diff --git a/Makefile b/Makefile index 0ea882a581..287a92e3c6 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,7 @@ GIT_COMMIT := $(if $(SOURCE_GIT_COMMIT),$(SOURCE_GIT_COMMIT),$(shell git rev-par OLM_VERSION := $(or $(OS_GIT_VERSION),0.0.0-$(GIT_COMMIT)) GO_BUILD_OPTS := -mod=vendor -GO_BUILD_TAGS := -tags "json1" +GO_BUILD_TAGS := -tags "json1,containers_image_openpgp" GO_PKG := github.com/operator-framework REGISTRY_PKG := $(GO_PKG)/operator-registry diff --git a/go.mod b/go.mod index 73e54653bd..cce37c0d11 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module github.com/openshift/operator-framework-olm -go 1.23.0 - -toolchain go1.23.4 +go 1.23.6 require ( github.com/blang/semver/v4 v4.0.0 @@ -13,16 +11,16 @@ require ( github.com/grpc-ecosystem/grpc-health-probe v0.4.37 github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 github.com/mikefarah/yq/v3 v3.0.0-20201202084205-8846255d1c37 - github.com/onsi/ginkgo/v2 v2.23.3 + github.com/onsi/ginkgo/v2 v2.23.4 github.com/openshift/api v3.9.0+incompatible - github.com/operator-framework/api v0.30.0 + github.com/operator-framework/api v0.31.0 github.com/operator-framework/operator-lifecycle-manager v0.0.0-00010101000000-000000000000 - github.com/operator-framework/operator-registry v1.51.0 + github.com/operator-framework/operator-registry v1.53.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 github.com/stretchr/testify v1.10.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 - google.golang.org/protobuf v1.36.5 + google.golang.org/protobuf v1.36.6 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.32.3 k8s.io/apimachinery v0.32.3 @@ -41,21 +39,23 @@ require ( replace google.golang.org/grpc => google.golang.org/grpc v1.63.2 require ( - cel.dev/expr v0.19.0 // indirect + cel.dev/expr v0.20.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/BurntSushi/toml v1.4.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/BurntSushi/toml v1.5.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/hcsshim v0.12.9 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect + github.com/VividCortex/ewma v1.2.0 // indirect + github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/akrylysov/pogreb v0.10.2 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/containerd/cgroups/v3 v3.0.3 // indirect + github.com/containerd/cgroups/v3 v3.0.5 // indirect github.com/containerd/containerd v1.7.27 // indirect github.com/containerd/containerd/api v1.8.0 // indirect github.com/containerd/continuity v0.4.4 // indirect @@ -65,19 +65,20 @@ require ( github.com/containerd/platforms v0.2.1 // indirect github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect - github.com/containers/common v0.62.0 // indirect - github.com/containers/image/v5 v5.34.2 // indirect + github.com/containers/common v0.63.0 // indirect + github.com/containers/image/v5 v5.35.0 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect - github.com/containers/storage v1.57.2 // indirect + github.com/containers/storage v1.58.0 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v28.0.0+incompatible // indirect + github.com/docker/cli v28.1.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.5.1+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.2 // indirect + github.com/docker/docker v28.0.4+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect @@ -85,7 +86,7 @@ require ( github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-air/gini v1.0.4 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect @@ -94,28 +95,36 @@ require ( github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.1 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect + github.com/go-openapi/validate v0.24.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/goccy/go-yaml v1.11.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-migrate/migrate/v4 v4.18.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.22.1 // indirect + github.com/google/cel-go v0.23.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.7.0 // indirect + github.com/google/go-containerregistry v0.20.3 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 // indirect - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.1 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/h2non/filetype v1.1.3 // indirect github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -126,13 +135,17 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kisielk/errcheck v1.8.0 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect github.com/kylelemons/godebug v1.1.0 // indirect + github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-sqlite3 v1.14.24 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mattn/go-sqlite3 v1.14.28 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/hashstructure v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -141,68 +154,84 @@ require ( github.com/moby/sys/capability v0.4.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect github.com/moby/sys/sequential v0.5.0 // indirect - github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect - github.com/moby/term v0.5.0 // indirect + github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/onsi/gomega v1.36.2 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/onsi/gomega v1.37.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a // indirect github.com/otiai10/copy v1.14.1 // indirect github.com/otiai10/mint v1.6.3 // indirect - github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/proglottis/gpgme v0.1.4 // indirect github.com/prometheus/client_golang v1.21.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.63.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect + github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect + github.com/sigstore/fulcio v1.6.6 // indirect + github.com/sigstore/protobuf-specs v0.4.1 // indirect + github.com/sigstore/rekor v1.3.10 // indirect + github.com/sigstore/sigstore v1.9.3 // indirect + github.com/smallstep/pkcs7 v0.1.1 // indirect github.com/spf13/pflag v1.0.6 // indirect - github.com/spiffe/go-spiffe/v2 v2.4.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect + github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/tidwall/btree v1.7.0 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect + github.com/vbatts/tar-split v0.12.1 // indirect + github.com/vbauerster/mpb/v8 v8.9.3 // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/zeebo/errs v1.3.0 // indirect + github.com/zeebo/errs v1.4.0 // indirect go.etcd.io/bbolt v1.4.0 // indirect go.etcd.io/etcd/api/v3 v3.5.16 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect go.etcd.io/etcd/client/v3 v3.5.16 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.36.0 // indirect + golang.org/x/crypto v0.37.0 // indirect golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 // indirect golang.org/x/lint v0.0.0-20241112194109-818c5a804067 // indirect - golang.org/x/mod v0.23.0 // indirect - golang.org/x/net v0.37.0 // indirect - golang.org/x/oauth2 v0.28.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/net v0.39.0 // indirect + golang.org/x/oauth2 v0.29.0 // indirect + golang.org/x/sync v0.13.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/term v0.31.0 // indirect + golang.org/x/text v0.24.0 // indirect golang.org/x/time v0.11.0 // indirect - golang.org/x/tools v0.30.0 // indirect - golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + golang.org/x/tools v0.31.0 // indirect + golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e // indirect - google.golang.org/grpc v1.70.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect + google.golang.org/grpc v1.72.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect @@ -219,6 +248,7 @@ require ( k8s.io/kms v0.32.3 // indirect k8s.io/kube-aggregator v0.32.3 // indirect k8s.io/kubectl v0.32.0 // indirect + oras.land/oras-go/v2 v2.5.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect @@ -226,6 +256,8 @@ require ( ) replace ( + // pinned because of incompatibility with k8s.io/apiserver + github.com/google/cel-go => github.com/google/cel-go v0.22.1 // controller runtime github.com/openshift/api => github.com/openshift/api v0.0.0-20210517065120-b325f58df679 github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20200326155132-2a6cd50aedd0 // release-4.5 diff --git a/go.sum b/go.sum index 75e74c55d9..1f11d428af 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.19.0 h1:lXuo+nDhpyJSpWxpPVi5cPUwzKb+dsdOiw6IreM5yt0= -cel.dev/expr v0.19.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= +cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -1307,13 +1307,15 @@ cloud.google.com/go/workflows v1.12.1/go.mod h1:5A95OhD/edtOhQd/O741NSfIMezNTbCw cloud.google.com/go/workflows v1.12.2/go.mod h1:+OmBIgNqYJPVggnMo9nqmizW0qEXHhmnAzK/CnBqsHc= cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= cloud.google.com/go/workflows v1.12.4/go.mod h1:yQ7HUqOkdJK4duVtMeBCAOPiN1ZF1E9pAMX51vpwB/w= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= @@ -1322,8 +1324,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxB github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= -github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= @@ -1340,6 +1342,10 @@ github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= +github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -1379,8 +1385,6 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -1409,8 +1413,8 @@ github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= -github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= -github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII= github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0= github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= @@ -1429,16 +1433,16 @@ github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRq github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/common v0.62.0 h1:Sl9WE5h7Y/F3bejrMAA4teP1EcY9ygqJmW4iwSloZ10= -github.com/containers/common v0.62.0/go.mod h1:Yec+z8mrSq4rydHofrnDCBqAcNA/BGrSg1kfFUL6F6s= -github.com/containers/image/v5 v5.34.2 h1:3r1etun4uJYq5197tcymUcI1h6+zyzKS9PtRtBlEKMI= -github.com/containers/image/v5 v5.34.2/go.mod h1:MG++slvQSZVq5ejAcLdu4APGsKGMb0YHHnAo7X28fdE= +github.com/containers/common v0.63.0 h1:ox6vgUYX5TSvt4W+bE36sYBVz/aXMAfRGVAgvknSjBg= +github.com/containers/common v0.63.0/go.mod h1:+3GCotSqNdIqM3sPs152VvW7m5+Mg8Kk+PExT3G9hZw= +github.com/containers/image/v5 v5.35.0 h1:T1OeyWp3GjObt47bchwD9cqiaAm/u4O4R9hIWdrdrP8= +github.com/containers/image/v5 v5.35.0/go.mod h1:8vTsgb+1gKcBL7cnjyNOInhJQfTUQjJoO2WWkKDoebM= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= -github.com/containers/storage v1.57.2 h1:2roCtTyE9pzIaBDHibK72DTnYkPmwWaq5uXxZdaWK4U= -github.com/containers/storage v1.57.2/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM= +github.com/containers/storage v1.58.0 h1:Q7SyyCCjqgT3wYNgRNIL8o/wUS92heIj2/cc8Sewvcc= +github.com/containers/storage v1.58.0/go.mod h1:w7Jl6oG+OpeLGLzlLyOZPkmUso40kjpzgrHUk5tyBlo= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -1453,6 +1457,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6N github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -1465,14 +1471,14 @@ github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCd github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v28.0.0+incompatible h1:ido37VmLUqEp+5NFb9icd6BuBB+SNDgCn+5kPCr2buA= -github.com/docker/cli v28.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k= +github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= -github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= -github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker v28.0.4+incompatible h1:JNNkBctYKurkw6FrHfKqY0nKIDf5nrbxjVBtS+cdcok= +github.com/docker/docker v28.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= @@ -1523,8 +1529,8 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= -github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -1568,6 +1574,10 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= +github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -1578,14 +1588,24 @@ github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwoh github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -1595,6 +1615,8 @@ github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= @@ -1623,8 +1645,9 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -1685,6 +1708,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= +github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= github.com/google/go-pkcs11 v0.2.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -1712,8 +1737,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= @@ -1768,20 +1793,21 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 h1:kQ0NI7W1B3HwiN5gAYtY+XFItDPbLBwYRxAqbFTyDes= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0/go.mod h1:zrT2dxOAjNFPRGjTUe2Xmb4q4YdUwVvQFV6xiCSf+z0= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.1 h1:KcFzXwzM/kGhIRHvc8jdixfIJjVzuUJdnv+5xsPutog= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.1/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= github.com/grpc-ecosystem/grpc-health-probe v0.4.37 h1:x8IIa8JfqiXpBX3xtmm3jz5b9YMq4Lpc9/b+ZAfzRZI= github.com/grpc-ecosystem/grpc-health-probe v0.4.37/go.mod h1:uKUtQKoBUvaI4Ez1jaLCHxFYYYoYn/FYkqNgl+8hADw= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= @@ -1793,8 +1819,8 @@ github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= @@ -1817,6 +1843,8 @@ github.com/itchyny/timefmt-go v0.1.6 h1:ia3s54iciXDdzWzwaVKXZPbiXzxxnv1SPGFfM/my github.com/itchyny/timefmt-go v0.1.6/go.mod h1:RRDZYC5s9ErkjQvTvvU7keJjxUYzIISJGxm9/mAERQg= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= github.com/joelanford/ignore v0.1.1 h1:vKky5RDoPT+WbONrbQBgOn95VV/UPh4ejlyAbbzgnQk= github.com/joelanford/ignore v0.1.1/go.mod h1:8eho/D8fwQ3rIXrLwE23AaeaGDNXqLE9QJ3zJ4LIPCw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -1844,8 +1872,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -1868,6 +1896,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= @@ -1881,8 +1911,8 @@ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -1899,14 +1929,18 @@ github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= -github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= +github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikefarah/yq/v3 v3.0.0-20201202084205-8846255d1c37 h1:lPmsut5Sk7eK2BmDXuvNEvMbT7MkAJBu64Yxr7iJ6nk= github.com/mikefarah/yq/v3 v3.0.0-20201202084205-8846255d1c37/go.mod h1:dYWq+UWoFCDY1TndvFUQuhBbIYmZpjreC8adEAx93zE= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= @@ -1930,12 +1964,12 @@ github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9Kou github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= -github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1950,26 +1984,27 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= -github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= +github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= -github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= +github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/openshift/api v0.0.0-20210517065120-b325f58df679 h1:aKnhCEj48qYquZk+w5t0fVthwAtifHUXTQrvwM1xJf8= github.com/openshift/api v0.0.0-20210517065120-b325f58df679/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= @@ -1981,8 +2016,6 @@ github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= -github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= @@ -1998,7 +2031,12 @@ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qR github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M= +github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= @@ -2009,13 +2047,15 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= @@ -2024,10 +2064,13 @@ github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fO github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= -github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY= -github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c= +github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= +github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -2040,12 +2083,26 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= +github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= +github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sigstore/fulcio v1.6.6 h1:XaMYX6TNT+8n7Npe8D94nyZ7/ERjEsNGFC+REdi/wzw= +github.com/sigstore/fulcio v1.6.6/go.mod h1:BhQ22lwaebDgIxVBEYOOqLRcN5+xOV+C9bh/GUXRhOk= +github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby1BxGU7Zc= +github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.3.10 h1:/mSvRo4MZ/59ECIlARhyykAlQlkmeAQpvBPlmJtZOCU= +github.com/sigstore/rekor v1.3.10/go.mod h1:JvryKJ40O0XA48MdzYUPu0y4fyvqt0C4iSY7ri9iu3A= +github.com/sigstore/sigstore v1.9.3 h1:y2qlTj+vh+Or3ictKuR3JUFawZPdDxAjrWkeFhon0OQ= +github.com/sigstore/sigstore v1.9.3/go.mod h1:VwYkiw0G0dRtwL25KSs04hCyVFF6CYMd/qvNeYrl7EQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smallstep/pkcs7 v0.1.1 h1:x+rPdt2W088V9Vkjho4KtoggyktZJlMduZAtRHm68LU= +github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= @@ -2067,8 +2124,10 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spiffe/go-spiffe/v2 v2.4.0 h1:j/FynG7hi2azrBG5cvjRcnQ4sux/VNj8FAVc99Fl66c= -github.com/spiffe/go-spiffe/v2 v2.4.0/go.mod h1:m5qJ1hGzjxjtrkGHZupoXHo/FDWwCB1MdSyBzfHugx0= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= @@ -2094,14 +2153,18 @@ github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf github.com/substrait-io/substrait-go v0.4.2/go.mod h1:qhpnLmrcvAnlZsUyPXZRqldiHapPTXC3t7xFgDi3aQg= github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/vbatts/tar-split v0.11.7 h1:ixZ93pO/GmvaZw4Vq9OwmfZK/kc2zKdPfu0B+gYqs3U= -github.com/vbatts/tar-split v0.11.7/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbauerster/mpb/v8 v8.9.3 h1:PnMeF+sMvYv9u23l6DO6Q3+Mdj408mjLRXIzmUmU2Z8= +github.com/vbauerster/mpb/v8 v8.9.3/go.mod h1:hxS8Hz4C6ijnppDSIX6LjG8FYJSoPo9iIOcE53Zik0c= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -2116,8 +2179,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs= -github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.einride.tech/aip v0.66.0/go.mod h1:qAhMsfT7plxBX+Oy7Huol6YUvZ0ZzdUz26yZsQwfl1M= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -2137,6 +2200,8 @@ go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -2146,23 +2211,25 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= @@ -2171,12 +2238,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7Z go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= @@ -2190,30 +2257,32 @@ go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -2249,8 +2318,10 @@ golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2319,12 +2390,15 @@ golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= -golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2397,8 +2471,9 @@ golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= -golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2432,8 +2507,8 @@ golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= -golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= -golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= +golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2455,8 +2530,10 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2558,8 +2635,11 @@ golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2577,8 +2657,10 @@ golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2599,8 +2681,10 @@ golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2684,8 +2768,9 @@ golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2694,8 +2779,9 @@ golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk= +golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= @@ -2944,8 +3030,8 @@ google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqt google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M= google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= +google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= @@ -3008,8 +3094,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4= google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e h1:YA5lmSs3zc/5w+xsRcHqpETkaYyK63ivEPzNTcUUlSA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1:N/PrbTw4kdkqNRzVfWPrBekzLuarFREcbFOiOLkXon4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -3035,8 +3121,8 @@ google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -3073,9 +3159,9 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -3195,6 +3281,8 @@ modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ= +oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c= +oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/manifests/0000_50_olm_00-catalogsources.crd.yaml b/manifests/0000_50_olm_00-catalogsources.crd.yaml index e12a282f3f..728cf917c2 100644 --- a/manifests/0000_50_olm_00-catalogsources.crd.yaml +++ b/manifests/0000_50_olm_00-catalogsources.crd.yaml @@ -979,11 +979,10 @@ spec: configured to use *must* be using the file-based catalogs in order to utilize this feature. type: object required: - - cacheDir - catalogDir properties: cacheDir: - description: CacheDir is the directory storing the pre-calculated API cache. + description: CacheDir is the (optional) directory storing the pre-calculated API cache. type: string catalogDir: description: CatalogDir is the directory storing the file-based catalog contents. diff --git a/microshift-manifests/0000_50_olm_00-catalogsources.crd.yaml b/microshift-manifests/0000_50_olm_00-catalogsources.crd.yaml index e12a282f3f..728cf917c2 100644 --- a/microshift-manifests/0000_50_olm_00-catalogsources.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-catalogsources.crd.yaml @@ -979,11 +979,10 @@ spec: configured to use *must* be using the file-based catalogs in order to utilize this feature. type: object required: - - cacheDir - catalogDir properties: cacheDir: - description: CacheDir is the directory storing the pre-calculated API cache. + description: CacheDir is the (optional) directory storing the pre-calculated API cache. type: string catalogDir: description: CatalogDir is the directory storing the file-based catalog contents. diff --git a/staging/api/.github/dependabot.yml b/staging/api/.github/dependabot.yml index a3e5facb7b..2e4405444c 100644 --- a/staging/api/.github/dependabot.yml +++ b/staging/api/.github/dependabot.yml @@ -7,4 +7,12 @@ updates: - package-ecosystem: "gomod" directory: "/" schedule: - interval: "weekly" \ No newline at end of file + interval: "weekly" + groups: + k8s-dependencies: + patterns: + - "k8s.io/*" + - "sigs.k8s.io/*" + golang-x-deps: + patterns: + - "golang.org/x/*" \ No newline at end of file diff --git a/staging/api/crds/operators.coreos.com_catalogsources.yaml b/staging/api/crds/operators.coreos.com_catalogsources.yaml index e6509764d0..62efa56727 100644 --- a/staging/api/crds/operators.coreos.com_catalogsources.yaml +++ b/staging/api/crds/operators.coreos.com_catalogsources.yaml @@ -975,11 +975,10 @@ spec: configured to use *must* be using the file-based catalogs in order to utilize this feature. type: object required: - - cacheDir - catalogDir properties: cacheDir: - description: CacheDir is the directory storing the pre-calculated API cache. + description: CacheDir is the (optional) directory storing the pre-calculated API cache. type: string catalogDir: description: CatalogDir is the directory storing the file-based catalog contents. diff --git a/staging/api/crds/zz_defs.go b/staging/api/crds/zz_defs.go index 127f91ba02..2d69fab3f0 100644 --- a/staging/api/crds/zz_defs.go +++ b/staging/api/crds/zz_defs.go @@ -85,7 +85,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _operatorsCoreosCom_catalogsourcesYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x6b\x73\x1c\xb7\xb1\xe8\x77\xfd\x0a\x94\xee\xa9\x12\xa9\xec\x2e\x25\xe7\x94\x93\x30\x8e\x5d\x0c\x25\xeb\xb2\xac\x07\x4b\xa4\x9d\xba\x51\x74\xaf\xb0\x33\xbd\xbb\x30\x67\x80\x31\x80\x21\xb9\x8e\xf3\xdf\x6f\xa1\x01\xcc\x63\x77\xe7\xc9\x97\xe4\x03\x7c\xb0\xc5\x9d\x19\x3c\x1a\xdd\x8d\x7e\x83\x66\xec\x27\x90\x8a\x09\x7e\x48\x68\xc6\xe0\x5a\x03\x37\x7f\xa9\xd9\xc5\x9f\xd5\x8c\x89\x83\xcb\xe7\x8f\x2e\x18\x8f\x0f\xc9\x71\xae\xb4\x48\xdf\x83\x12\xb9\x8c\xe0\x05\x2c\x18\x67\x9a\x09\xfe\x28\x05\x4d\x63\xaa\xe9\xe1\x23\x42\x28\xe7\x42\x53\xf3\xb3\x32\x7f\x12\x12\x09\xae\xa5\x48\x12\x90\xd3\x25\xf0\xd9\x45\x3e\x87\x79\xce\x92\x18\x24\x76\xee\x87\xbe\x7c\x36\x7b\xfe\xa7\xd9\x57\x8f\x08\xe1\x34\x85\x43\x12\x51\x4d\x13\xb1\xb4\x63\xa9\x99\xc8\x40\x52\x2d\xa4\x9a\x45\x42\x82\x30\xff\x4b\x1f\xa9\x0c\x22\x33\xc8\x52\x8a\x3c\x3b\x24\x3b\xdf\xb1\xfd\xf9\xb9\x50\x0d\x4b\x21\x99\xff\x9b\x90\x29\x11\x49\x8a\xff\x76\x6b\xb4\xc3\x9e\xe1\xb0\xf8\x7b\xc2\x94\xfe\x61\xfb\xd9\x6b\xa6\x34\x3e\xcf\x92\x5c\xd2\x64\x73\xc2\xf8\x48\xad\x84\xd4\x6f\xcb\xe1\xcd\x70\x11\xd5\x4a\x46\xf6\x31\xe3\xcb\x3c\xa1\x72\xe3\xdb\x47\x84\xa8\x48\x64\x70\x48\xf0\xd3\x8c\x46\x10\x3f\x22\xc4\x41\xca\x75\x35\x25\x34\x8e\x11\xfa\x34\x39\x95\x8c\x6b\x90\xc7\x22\xc9\x53\x5e\x0c\x65\xde\x89\x41\x45\x92\x65\x1a\x21\x7c\xbe\x02\x92\x49\xd0\x7a\x8d\x20\x21\x62\x41\xf4\x0a\xfc\xd8\xc5\x57\x84\xfc\xac\x04\x3f\xa5\x7a\x75\x48\x66\x06\xc2\xb3\x98\xa9\x2c\xa1\x6b\x33\x9b\xca\x5b\x76\x9b\x5e\xd8\x67\x95\xdf\xf5\xda\x4c\x5d\x69\xc9\xf8\xb2\x6d\x2a\xe6\xbd\xfe\x73\xb0\xa0\x39\x5f\x67\xdb\x53\xd8\xf8\xb1\xef\xf8\x59\x3e\x4f\x98\x5a\x81\xec\x3f\x89\xe2\x93\xad\x39\x9c\xee\x78\xd2\x30\x91\x4a\xa7\x9e\x6e\x66\x91\x04\x24\x99\x73\x96\x82\xd2\x34\xcd\xb6\x06\x38\x5a\x6e\xaf\x31\xa6\xda\xff\x68\x5f\xba\x7c\x4e\x93\x6c\x45\x9f\xbb\x1f\x55\xb4\x82\x94\x96\xf8\x20\x32\xe0\x47\xa7\x27\x3f\xfd\xf1\x6c\xe3\x01\xa9\x43\xa7\x86\xe7\x84\x29\x42\x89\x84\x4c\x28\xa6\x85\x5c\x1b\x68\x1d\x9f\xfd\xa4\x26\xe4\xf8\xfd\x0b\x35\x21\x94\xc7\x05\xe1\x91\x8c\x46\x17\x74\x09\x6a\xb6\x35\x57\x31\xff\x19\x22\x5d\xf9\x59\xc2\x2f\x39\x93\x10\x57\x67\x61\xc0\xe3\x61\xb2\xf1\xb3\x81\x7f\xe5\xa7\x4c\x9a\x31\x75\x85\x90\x6d\xab\x30\xb3\xda\xef\x1b\x2b\xfc\x6d\xba\xf1\x94\x10\x03\x18\xfb\x25\x89\x0d\x67\x03\x85\x48\xe1\xa8\x0e\x62\x07\x4d\x8b\x2c\x4c\x19\x88\x48\x50\xc0\x2d\xaf\x33\x3f\x53\xee\x56\x39\xdb\xea\xfc\x0c\xa4\xe9\xc8\x30\x84\x3c\x89\x0d\x4b\xbc\x04\xa9\x89\x84\x48\x2c\x39\xfb\xb5\xe8\x5d\x11\x2d\x70\xd8\x84\x6a\x50\x9a\x20\x5d\x73\x9a\x90\x4b\x9a\xe4\x80\xc0\xde\xea\x3b\xa5\x6b\x22\xc1\x8c\x4b\x72\x5e\xe9\x11\x3f\x51\xdb\x73\x79\x23\x24\x10\xc6\x17\xe2\x90\xac\xb4\xce\xd4\xe1\xc1\xc1\x92\x69\xcf\xec\x23\x91\xa6\x39\x67\x7a\x7d\x80\x7c\x9b\xcd\x73\xc3\x50\x0f\x62\xb8\x84\xe4\x40\xb1\xe5\x94\xca\x68\xc5\x34\x44\x3a\x97\x70\x40\x33\x36\xc5\xc5\x70\x64\xf8\xb3\x34\xfe\x5f\x12\xaa\x1c\x70\x13\x0d\x36\xa8\x81\x78\xbe\x3b\x70\xb3\x0c\x3f\xb6\x88\x69\x3b\xb4\x8b\x2d\xf7\xc4\xfc\x64\xc0\xf8\xfe\xe5\xd9\x39\xf1\x33\xb2\xfb\x66\xb7\xa8\x7c\x75\x07\x84\xfc\x6e\x19\xc8\x32\xbe\x00\x69\xbf\x5c\x48\x91\x62\xaf\xc0\xe3\x4c\x30\xae\x2d\xd7\x48\x18\x70\x4d\x54\x3e\x4f\x99\x56\x88\xd6\xa0\xb4\xd9\xc8\xed\x8e\x8f\xf1\x70\x24\x73\x20\x79\x66\x88\x37\xde\x7e\xe5\x84\x93\x63\x9a\x42\x72\x4c\x15\xdc\xfb\xde\x99\x3d\x52\x53\xb3\x21\xbd\x77\xaf\x7a\xf4\x6f\x7f\xb0\x45\xf5\x84\xf8\x33\xbb\xd7\xcb\x4d\x6c\x82\x58\x9e\xb0\xeb\x4c\x20\x2d\xdc\xc1\x34\x1a\xc7\x12\xd4\x8e\x07\xdd\x58\x67\xda\x91\xfd\xdc\x22\xdf\x4a\x28\x83\x04\x54\x93\x77\xaf\xdf\x90\x88\x72\x92\x2b\x30\x24\x1c\x09\xce\x0d\x96\x69\x41\xa8\x39\x73\xa7\x70\xcd\x14\x62\xa5\x84\x25\x53\x5a\xae\xb7\xf7\xd6\xb4\xef\x85\x4c\xa9\x3e\x24\xdf\xf8\xd7\xa6\x38\x84\x90\x84\x65\xdf\x1e\x7e\x93\x09\xa9\xbf\xdd\xf9\xe1\x3b\x9e\xac\xcd\xe0\x31\xb9\x5a\x01\x27\x67\x05\x64\xc8\xdf\x2a\x7f\xbc\x92\x59\xb4\x7b\xe0\x93\x25\x17\xd2\x7f\x6d\xd0\xfa\x24\xa5\x4b\x20\x0b\x06\x09\x12\x9a\x82\x1d\x5c\xad\x05\x2d\x88\x15\xfc\x16\x6c\xf9\x86\x66\x63\x61\x7d\xec\x3b\x30\x33\x30\x93\xaa\x8a\x2d\xe5\x43\x2d\x90\xa2\xcc\xe2\xcd\x3f\x69\x74\x41\xa8\x1b\x3c\xa5\xd9\x54\x21\x35\x77\x00\xbe\x1f\xfc\x8e\x7d\xa7\x66\x47\xca\x9f\x4f\x1c\x97\x1e\x0c\xa1\x2a\x10\x06\x7f\x5b\x0a\x65\x9d\xf0\x7d\xb3\xeb\x4c\xed\x31\xc6\x52\x66\xd1\xa9\x88\xed\xb2\xc7\xee\xe2\xab\x6a\x27\x04\xae\x33\xa1\x40\x91\x98\x2d\x16\x20\x0d\xe7\x14\x97\x20\x25\x8b\x41\x91\x85\x90\xb8\xb5\x99\x88\x91\x4d\x14\x5b\x5d\x93\x47\x4e\xc5\x0e\xb6\x49\x06\xd1\x00\x0a\x2d\x16\xc3\xbb\x70\x7b\x27\x53\x22\x1d\x3c\xc6\x34\xba\x40\xf5\x68\xbd\xfb\xe9\x06\xe8\x8e\xdc\xcb\x1e\xd1\x9d\x28\xea\x38\xdc\x13\x65\x40\xf2\x44\x15\x7d\xee\x5e\x7f\xe7\x94\xfb\x4c\xdb\x34\x2e\x62\x38\xea\x98\xfe\xd6\x12\x5e\xe0\x1f\x73\x50\xf8\x79\x31\x55\x14\x6c\xe2\x3c\x41\xde\x97\x27\xf5\x5d\x6e\x5a\x47\xcf\xb5\xf4\x5d\x8f\x7d\x0f\x16\x20\x25\xc4\x2f\x72\x83\xea\x67\xc5\xac\x1c\xe7\xb3\x3f\xbf\xbc\x86\x28\x6f\x22\xc7\xc6\xa5\x37\x20\x7e\xb5\x19\x85\xc3\x41\x02\x24\xb9\x62\x49\xe2\x66\x64\x58\x96\x7f\x60\x40\x82\x12\xa0\x81\xa0\xb2\x07\x8b\xa2\x9a\xa9\xc5\xba\x73\x00\x03\xd1\x02\xe6\x70\x6d\x84\x1b\xd4\x16\x91\x90\xd8\x82\x41\x4c\xe6\x6b\x27\xc7\x18\xa6\x3e\x21\xf3\x5c\x13\xa6\x51\xc8\x89\x56\x42\xa8\xcd\x43\x74\xbb\x51\xbb\xb5\x38\xaf\x4b\x26\x50\x46\x25\x82\x83\xe1\x86\xa9\x91\x4c\x1c\xbd\x56\x86\x9f\xe1\xca\xcb\xcf\xd8\xa6\x60\xb1\xdd\x52\x73\xe2\x15\xdb\xe5\x29\xc2\x0c\x73\xc5\xf4\x0a\xff\x58\x1a\x75\xc9\xc8\xc7\x2a\x4f\xcd\xa0\x57\xc0\x96\x2b\xad\x26\x84\xcd\x76\x08\x4d\x9b\xcd\x20\x20\xd0\x68\x55\x99\x56\x0a\xa0\x15\xa1\x49\xe2\x97\x50\xc5\x5a\x2b\x81\xa4\x46\x56\x24\x7b\x5e\x98\xec\x1c\xc5\x09\x82\x93\x42\x82\xd9\x44\xbc\x9d\xdb\x35\x21\xa0\xa3\xd9\xfe\xa4\xb3\xfb\x48\xa4\x59\xae\xc1\x48\xc0\x79\x6a\xb6\x96\x69\xa3\x83\x59\xc1\x57\x8a\x7c\x69\x21\x05\x89\x9b\xb8\x57\x5b\xec\x89\x6e\xf8\x1f\x8d\xe3\x5d\x3c\x7f\xb3\x3d\xb6\xc0\x7d\xec\x35\x13\x33\x1c\xb3\x40\x42\xf8\xa5\x54\x47\x2b\xa7\x2c\x45\x42\x4a\x50\x99\xe0\xa6\x67\xfb\xe4\x65\xb9\xb6\xbf\x9a\x77\x3a\xc7\x33\x9d\xee\xa9\xfd\x72\xb3\x57\x6c\xb9\xf2\x7b\x4d\x25\xe0\x6f\x75\x1c\xe9\xda\x72\xcb\x4a\xa8\x94\xb4\x8b\x8e\x98\x86\xb4\x83\x91\x90\x11\xd4\x4f\xc8\x11\x27\x90\x66\x7a\x5d\x41\xec\x0a\x8a\x69\x90\x69\x01\x48\xc4\x42\x64\x7b\xca\x02\x81\xa5\x59\xc2\x22\xa6\x1d\x9a\x93\x67\x3d\xc6\xdb\x33\x94\x40\x98\x36\x87\x06\xe1\x62\x2a\xb2\xfd\x19\x39\x22\x3c\x2f\x18\x4f\xdb\x14\xb8\x28\x66\xe0\x3a\x32\xd3\x52\xa2\xec\xab\x9b\x1f\xf5\x63\xdf\xb6\x35\x0b\xf9\xdb\x6d\xea\xe6\x0f\xbc\x07\x11\x9a\xd7\x2d\xd4\x3a\x5f\xed\x7b\x90\xf8\xb7\xfd\x1c\xfa\xbc\xbd\x79\xda\x5b\xca\x51\x90\x40\xa4\xcd\x69\x08\x32\x9d\x10\xaa\x94\x88\x98\xd1\x0a\x4b\xdc\xaf\x13\x94\x5d\x49\x37\xec\xc9\x50\xf8\x93\xc1\xeb\x27\x68\x78\xa8\xd3\x77\xdf\xef\xb6\xa0\x91\x30\xa3\xe3\x2c\x36\xa0\x52\xe3\xbb\xf3\x35\x3e\x7d\xa2\x48\x42\xe7\x90\xec\x50\xda\x9b\x5a\x7f\xe2\x2f\x5b\x4f\x36\xd0\xb0\xa0\x5e\x0c\xa1\x6c\x9b\xd8\x50\x59\xb7\x33\x71\x14\x78\x62\x8e\x2a\xa3\xda\x53\xc6\x95\xb3\xef\x4c\x08\x25\x17\xb0\xb6\x76\x38\xca\x0b\x53\xdc\xa0\x29\x60\xc7\x12\xec\x81\x6e\xf0\xee\x02\xd6\xd8\x61\x93\x0d\xa9\xa5\xab\xa1\x78\x67\xdb\x10\x0e\x50\xb6\xa9\x99\xe8\xc0\x2f\x46\x00\x68\x38\x69\xd8\x76\x01\xad\xe2\xf3\xae\xb6\x65\xa5\x46\x74\xc7\xfd\xc0\x4d\xc2\x13\xd8\xe3\x03\xcd\xb2\x84\xc1\x6e\x53\x53\x7b\x6b\xd5\xfa\xda\x9a\x87\xde\x8d\xd6\x35\x90\x40\x4c\x7b\x5f\x98\xeb\x2c\xbe\x3f\x51\x16\x5f\x0d\xdf\x59\xb1\xcc\x1a\x5a\x14\x20\x1b\x19\x8e\xb4\xb6\xfd\x44\x13\x56\xda\xb2\x15\x0a\x1b\x27\x7c\x42\xde\x0a\x6d\xfe\xf7\xf2\x9a\x29\x23\x63\xbe\x10\xa0\xde\x0a\x8d\x7f\xce\xc8\x2b\x6d\x49\xef\x75\x4f\xbe\x5c\xb6\xd1\x7b\x60\xd7\x77\xdf\x3b\x70\xc4\x2d\x13\x35\x10\xae\x1a\x5d\xd5\x8c\x9c\x58\x71\xb0\xf0\x02\x30\x45\x4e\xb8\x51\x0a\x2c\xe4\x06\x0f\x85\x76\x77\xec\xdb\x0d\x99\xe6\x0a\xad\xa6\x5c\xf0\x29\x8a\x51\x3b\xc7\xb4\x1b\x64\xc6\xad\x6e\xd1\x2d\x0e\xdf\x3c\xf4\x2b\xb4\xd1\xbd\xd6\x93\xca\xc7\x83\xc7\xad\x0c\xb6\xa2\x97\x28\xda\x33\xbe\x4c\x0a\x21\x7e\x42\xae\x56\x2c\x5a\x59\xed\x71\x0e\xd6\x39\x90\x49\x30\x12\x03\x55\x86\xf9\x9b\x5f\x96\x20\x87\xa3\xfe\xb9\x51\x0d\xec\xf8\xd6\xb5\x91\xd0\x08\x62\x12\xa3\xca\x62\xad\xec\x54\xc3\x92\x45\x24\x05\xb9\x04\x92\x99\xa3\x7f\x1c\xc2\x0f\x3b\x89\x6d\x1b\x7c\x1e\x57\x07\x1c\x41\x61\x84\x5c\x4f\x2f\xf2\x39\x48\x0e\x1a\xd4\xd4\xc8\x27\x53\x37\x7b\x2d\x52\x16\xf5\xee\xec\x56\xba\x41\x39\xeb\x7b\xa3\xb7\xdd\x93\x88\x85\x3a\x62\x10\xb1\x82\x88\x15\x44\xac\x20\x62\x05\x11\xab\x77\x0b\x22\xd6\x8d\x87\x0f\x22\x56\x10\xb1\xee\x5d\xc4\xaa\x75\x91\xd2\x6c\x68\x0f\xd6\x2e\x37\xc2\x10\xf8\x0f\x6b\xd0\xdd\xb4\xfc\xa1\xc0\xe7\x43\x58\xea\x26\x40\x23\xc7\x9c\xb9\xc3\xe9\x1c\xcd\x86\xcc\xfa\xeb\x25\xe5\x4b\x20\xcf\xa7\xcf\x9f\x3d\x1b\x62\x20\x74\xe8\xdc\xeb\x8b\x85\x0b\x4e\x60\x5c\xff\xf1\xab\x8e\x2f\x6e\xb0\x2b\x4d\xfe\x92\xfb\x71\xd4\x39\xce\x53\xf8\x66\x6a\x22\x72\x83\x2f\x0d\x8f\x31\x2e\x34\x49\x41\x13\xda\x2d\x93\x55\xcd\xee\x2c\x85\x49\xe1\xf4\x46\xb6\xe3\x22\x84\xbc\x53\x30\x26\x82\x3b\xd7\x8b\xd9\xfc\xee\xcd\x1d\xb5\x82\x08\xa8\x8d\x5c\x99\x83\x59\x45\xb7\x2b\x50\x13\x25\x52\x33\x6b\xc6\xb5\x67\x62\x66\x09\xe0\x37\x86\xec\xc1\x6c\x39\x23\x71\x8e\xdd\x52\xee\x42\x9e\xf6\xed\x6a\xd5\x5a\x69\x48\xbb\x7d\x81\xe6\x30\x94\xf8\x3f\x03\x16\x2d\xd7\xa6\x33\xb8\x04\xae\x73\x9a\x24\x6b\x02\x97\x2c\xd2\x05\xfc\x30\x42\x8b\x69\xd5\x0b\x52\x03\xc4\xe8\xfe\xa2\xf3\x74\x8b\x42\xbb\x0e\xa9\x21\x92\xef\x56\xdf\x7d\x78\x4e\x8d\x02\xde\xbb\x95\xcc\x1a\x75\x42\x6d\xfa\xb5\x6e\x5b\xfc\x27\x22\xf7\xbb\xf7\xdd\x5e\x36\x32\xf8\xfc\x19\x70\xe6\x8c\x13\xad\x9c\xc3\x4b\x48\xe7\x7c\xdb\x5e\xe9\x0e\x97\x97\x5d\x7b\x8d\x6a\xc4\xa2\xe7\x80\x7a\x05\xd6\x49\x79\xf4\xf6\x45\x3f\x88\x11\x17\x1c\x70\x2e\x32\x91\x88\xe5\xba\xba\xbd\x36\x44\x9a\xa5\x99\x77\xe2\x52\xa2\xf2\xb9\x13\xc1\x0d\xce\xbf\xdd\xc0\x87\xe0\x1a\x0a\xae\xa1\x60\xb7\xc0\x16\xec\x16\xc1\x6e\x11\xec\x16\xfd\x5a\xb0\x5b\xdc\x78\xf8\x60\xb7\x08\x76\x8b\xe0\x1a\xda\x6e\x41\xc4\xea\x6e\x41\xc4\x6a\x6d\x41\xc4\x2a\x5a\x10\xb1\x82\x88\x15\x44\xac\x20\x62\x05\x11\xeb\xbe\xba\xb9\xa9\x6b\xe8\x46\x53\x18\x37\x78\x26\xe2\x1b\x24\x6f\x65\x22\x6e\xc9\xdd\xb2\x36\xfd\x48\x4c\x13\x11\x51\xed\x32\xaa\xcd\x27\xce\x0b\xa5\x68\x6a\xdd\x14\x13\xf2\xab\xe0\x60\xb3\x59\x0c\x79\xa0\xb3\x40\xe8\x15\x48\xf3\xfa\x9e\xda\x6f\x4d\x21\x08\xb9\x5f\x21\xf7\x2b\xe4\x7e\x35\xb6\xcf\x26\xf7\x6b\x45\x95\xc5\x5b\x7b\x34\x36\xa7\x82\x55\x78\xd2\x39\xc8\xf4\x77\x9a\x09\x66\xd0\xdd\xa1\x23\xd6\x03\x29\x51\xca\x42\x26\x76\x0e\x7f\x88\x4f\xeb\xf0\x70\x6a\x35\x2e\x8a\xc6\x31\xc4\x24\x03\x39\xb5\x28\x2a\xc8\x82\xf1\x78\xc7\x5a\x3d\x7c\xba\xd9\xc3\x1d\xa6\x62\xd5\xd7\xd1\xeb\x9b\xbb\xc9\xc7\xaa\x4f\x64\x84\x53\xb1\xea\x19\xad\x1d\x82\x9f\x45\x76\xd6\x50\x0d\x7d\x4a\xb4\x73\x28\xfe\xd0\x53\x47\x1f\xae\x66\xa3\x72\xec\xdd\x8f\x23\xcd\x4f\x83\x54\xa1\x23\xa7\x8f\xff\x92\x83\x5c\x63\xdd\x80\x52\xed\x2c\xca\xce\xb8\x88\x18\xa6\x48\x44\x95\x3d\x56\x87\x88\xca\x27\x0b\x9b\x35\xc9\xf3\x24\x99\xd8\x7e\x36\x89\xd5\xb3\x39\xc4\x03\x2e\xcc\xf3\xc1\x16\xb1\x81\x26\x9a\x71\x36\x90\xf1\x5e\x58\xb2\xb9\x4f\x9b\x5d\x59\xdb\x98\x37\x1d\xda\x6d\xd9\x69\x3b\xdc\xe1\x4d\x1f\xec\x1f\xb7\x6d\xac\x02\x33\x4a\x7d\xb9\xb1\xbe\xde\x02\x93\x1b\xd8\x15\xf1\xe5\xc1\x93\xb9\x1d\xdb\x22\x19\x6f\x5f\x24\xa3\x6d\x8c\x64\x94\x9d\x91\x8c\xb5\x35\x92\x1b\xd8\x1b\xc9\x38\x9b\x23\xd9\xc4\x36\xb3\x43\x4e\xf0\xbd\x1b\xf3\x23\xb9\x99\x72\x3e\xde\x0c\x49\x6e\x81\xb0\xaa\xe3\x57\x8a\x78\xdd\x9d\x5d\x92\xf4\xb5\x4d\x22\x59\xd5\xcc\x93\xf7\xbd\x2f\xe3\x4c\x93\xe4\x96\x76\xc5\x19\xed\x18\xda\xc2\xee\xcb\x58\x49\x1e\xde\x60\xd9\x3a\x05\x37\x7c\x6f\x0b\xdf\xa8\xd1\x6f\x60\x15\x24\x37\xb2\x0c\x92\xf1\xd6\x41\x72\x53\x64\xbf\x35\x2b\xe1\xad\x76\x85\x72\xd2\x6b\x8c\x43\xbb\x81\xb4\x35\x98\x02\x2b\xc3\x5a\xe9\x02\x6b\x94\x2d\xc8\xbf\x8d\x10\x81\x88\xf9\x1f\x92\x51\x26\x95\xd1\x6e\x9c\x5d\xbb\xfa\xcc\x99\xef\x2a\xdd\x0c\x9e\x00\x16\x69\x33\x67\xfc\x25\x4d\x8c\x90\x63\x83\x7e\x9d\x0d\xc2\xcc\x65\x53\x84\x9c\x90\xab\x95\x50\x56\x22\x29\xea\xcc\x3d\xbe\x80\xf5\xe3\x49\x2f\xf3\x40\xbd\x55\x29\xfb\xf1\x09\x7f\x6c\x45\xa7\x2d\xba\x2c\xe4\x2c\xc1\x93\x35\x79\x8c\xcf\x1e\xdf\xb6\x8c\x3a\x42\x3e\xaa\x16\xf3\x1d\x2b\x7e\x8c\x22\xa7\x9b\x1a\xb8\x49\x0d\xfb\x7e\x80\xf5\xd8\x40\x84\x41\x38\xff\xa6\x36\xa2\x17\xa8\x11\xcd\x8c\xee\x5e\xc8\x4d\x68\x51\xb5\x22\x93\xb3\x51\x59\x73\x15\x4b\x92\x01\xa3\xcd\x81\x68\x7a\x01\xe8\xe1\xc1\xda\x8e\x8a\xc5\x68\x70\x13\xdc\xa2\x0e\x8e\x64\x50\xc6\x17\x21\x4c\x84\xb8\xc8\x33\x8f\x7a\xbe\x70\xe8\x80\x21\x19\x8f\x44\xea\x63\xea\x6d\x5c\xab\xa1\x0a\x47\x2f\x53\x5b\xee\xd4\xfe\x8e\x03\xe3\x11\xe0\x4c\x13\x9f\x6a\x8a\xf9\x27\x42\x15\xf9\x84\x42\x25\x27\x7b\xf8\xe1\xfe\xa7\x21\x21\x05\x05\x00\xad\xf5\x55\xe4\xc8\x59\x8a\x82\x96\x15\xfb\x5f\x01\xdb\x36\x88\x0d\x18\xda\x57\x8a\xab\x02\xc3\xd6\xc2\x23\x7b\x94\x6b\xb6\x5f\x16\xc4\x23\x88\x07\x28\x25\xc7\x82\x3f\xd1\x76\x7e\x9e\xaf\xf9\x0e\x86\x44\x1d\x14\x70\x2f\x3d\x7e\xd6\x5b\x60\xb7\x3c\x86\x05\xcd\x13\xed\x0a\xcf\x1a\xd6\x87\x27\xfd\x80\x11\xce\xbd\xbf\xc4\x09\xfc\x0b\x21\xe7\x2c\x8e\x81\x63\x8a\x83\x9f\xfe\x5c\xf8\x84\xa0\x12\xdd\x0d\x67\xab\xed\xf1\x90\x61\x8f\x12\x25\x26\x9b\x3d\x46\x45\x61\x5a\x43\x45\x58\x3e\xb1\x36\x00\x61\xca\x00\xb5\xa1\x52\x62\xf3\x0a\x99\xa3\xcd\x39\x68\x5a\x31\x3f\x3b\x56\xab\x08\x70\x3a\x47\x7b\x78\x9d\xa0\x4f\x78\xc5\xe6\x42\x16\x40\x75\x2e\x81\x2c\xa9\x06\xb2\x87\x9f\x58\x8f\x86\xdb\x83\x1e\xd5\xa8\x7c\xbb\x97\x70\xa4\x9b\xf3\xe1\xd1\xe1\x60\x4c\x3d\x00\x2f\xde\x1c\x34\xb0\xe3\x5e\xec\x98\x0b\x1d\x38\xf2\x17\xcb\x91\xb7\x90\xfe\x36\x98\xf2\x56\xa7\x81\x2f\x37\xb7\x2f\x88\x2f\x73\x7f\x8f\xc7\x03\x7b\x4b\xac\x01\xd1\x46\xfb\xfa\x29\xa9\xd2\xb6\x88\x09\x71\xe3\xec\x8a\x3e\x5d\x10\xd1\x0a\x7b\x88\xbd\xd3\x38\xe7\xee\x7e\x04\x5f\xc1\xdb\x0d\x6b\x79\xd8\x66\x6c\xc2\x80\x31\xbd\x82\x27\x38\x28\x74\x45\x40\x11\x87\x52\x19\x06\x7b\x1d\xb2\x14\x4c\x19\x2c\xcd\xac\x3c\xde\x4c\x22\x2c\xfb\x46\xff\x47\x0a\x94\x2b\xf2\xd8\x87\xc2\x3c\x51\xe5\x1b\x8f\x07\xf1\x00\x5f\x22\xb4\x18\x7b\xef\xdf\xff\xd9\xaf\x95\x05\x2d\x87\x0e\xfe\xa6\xe0\x6f\xaa\xb6\xe0\x6f\xda\x9e\x44\xf0\x37\x35\xb5\xe0\x6f\x1a\x35\x7e\xf0\x37\xd5\x5b\xf0\x37\x05\x7f\x53\xf0\x37\x05\x7f\x53\xf0\x37\x05\x7f\x53\xdf\x8f\x6e\xc3\xdf\x54\xaa\x41\xf7\xa1\x47\x57\x55\x56\x17\x4f\x6f\x2f\x9e\xa3\x9a\x45\x65\xf6\xac\x7f\xcb\xfe\xeb\xa1\x94\xea\xaa\x1a\x7c\x53\x95\xba\xaa\xa0\x6f\x59\x30\x06\x6b\xd4\x8d\xfa\x73\xa1\x61\x6f\x8d\x71\x4b\xaa\xf5\xef\xdc\xb8\x54\x09\xf4\xbd\x0f\x72\x38\xf7\x29\x3f\xee\x2e\xcb\x39\x94\xf9\x40\x31\xd9\xf3\xf6\xdf\x7d\xb3\xe1\x5c\xe8\xfa\x43\xae\xd9\xb4\x7c\xa3\x08\xa4\x46\x33\xb5\xaf\x7a\x37\x04\xe8\x5e\x67\x29\xed\xa1\x2e\x0f\xa9\x48\x7c\x29\xb1\xcd\xf0\x7f\x90\xb5\xd9\x32\xe5\xee\xfa\xc4\x14\x3e\x99\x73\x6e\xc4\x23\xc1\x5d\xb6\xcb\x80\x99\xd8\xa3\xc5\x9a\x9e\x1d\x55\x5a\x65\x0a\xd7\x88\x1a\x55\xb9\x4d\x95\x44\x09\xaa\xed\xcd\xa1\xae\x36\x95\xe0\xce\x70\x6f\x7e\xb1\xfd\x0c\x98\x44\x41\xb0\x08\x4f\x56\xac\x68\x08\xad\xbe\x44\x1a\xad\x4e\x96\x29\xdc\x47\x9a\x24\xe2\x6a\xc8\xf9\x34\x90\x22\x46\x17\x51\xec\x8d\xbd\x57\x83\xab\x2d\x6e\x64\x15\xf4\x95\xd5\x43\x49\xc6\xd6\x16\x4a\x32\x7e\x1e\x25\x19\x2b\xbe\xd9\x6a\x6d\xc6\x6e\x58\x61\xed\xc6\x3b\xad\xcd\x48\xc8\x3f\xdc\x45\xa4\x12\xac\x43\x35\x4f\x34\xcb\xca\x6c\x6c\x65\x77\x28\xb1\x2a\xf5\xc2\x65\x4d\xd6\xa9\xd7\xcc\x86\x46\xab\xce\xa1\x36\xa8\x1c\xc7\xc3\xec\x6e\x85\xdc\xd4\x66\x16\xa2\xfd\xdd\x16\x44\xf4\xba\xb6\x4d\xcf\x64\x0f\x9d\x75\xd6\x8b\xff\xbd\x70\x57\x5a\x57\xfd\xf2\x8a\xec\x99\xd3\x31\x59\x3b\x4f\x76\x8d\x11\xd6\x8e\xd5\x1e\x03\x58\x3b\xd8\x25\x78\x01\x75\xc9\x2e\x81\x97\xa7\xef\x9e\xda\xdf\xf7\x32\xf1\xa6\xfc\xd0\xa3\xf7\x9b\x48\x18\x7d\xb8\xf6\x50\xc9\x60\xe3\xbc\xef\x31\xc2\x0e\x89\xe0\x9b\xca\x29\xfb\x6d\xb7\x4c\xd0\x63\x10\x4b\xd2\x3e\x8d\xb5\xb2\xd1\xa5\x2c\xd0\xd9\xcb\x1d\x66\x0b\x0e\x49\x49\x1b\x66\x15\x1f\x91\x8a\x36\xb6\xa2\xe8\xdd\xa6\xa0\xdd\x69\xfa\xd9\x97\x53\xf8\xf3\x81\xdd\x7f\x5f\x40\xd5\xaa\xcf\xc4\xdd\x17\xca\x56\x35\xb5\x87\x2a\x5b\x75\xe7\xee\xbc\x2f\xae\x7a\xd5\xbd\xba\xef\xee\xc7\x75\xf7\x85\x55\xaf\x7a\x10\x57\xdd\x67\x5e\xc7\xea\xee\x5c\x74\xa1\x48\xd4\xc3\xd4\xe1\x1c\xea\x86\x1b\x4f\x55\x0f\xea\x7e\x7b\x50\xd7\xdb\xc3\xbb\xdd\x46\xc9\x2a\x37\x75\xb7\x0d\x26\x93\x9b\xba\xd9\xc6\xa4\x11\x8c\xc3\xe7\xfb\x4b\xe5\xba\xe7\xbc\x81\xcf\x23\x85\xeb\x81\x92\x05\x1e\x2a\x51\xe0\x6e\x93\x04\x1e\x20\x65\xeb\x9e\xd2\xb5\x3e\xaf\x94\x80\xa1\x62\xc8\x20\xe1\xe3\x66\xbc\x74\x8c\x84\x30\x32\x2d\x6b\x24\x3f\xbd\xcf\x74\xac\xdf\x01\x4b\x1d\x95\x86\x15\xb8\xea\x03\x71\xd5\xdb\x4b\xbb\xba\xbf\x94\xab\xc0\x5b\x9b\xdb\x8d\x79\xeb\xc8\xd4\xaa\x5b\xb3\xfe\xdf\x4d\x4a\xd5\x7d\xa7\x53\xdd\x41\x2a\xd5\x43\xa4\x51\xdd\x41\x0a\x55\xf0\x99\xf4\x6c\xc1\x67\xd2\xb7\x05\x9f\x49\x53\x0b\x3e\x93\xcd\x16\x7c\x26\xc1\x67\x12\x7c\x26\xc1\x67\xb2\x3d\x60\xf0\x99\x04\x9f\x49\xbf\x16\x7c\x26\xf7\xe3\x33\x19\x9a\x96\x34\x0e\x97\x1f\x26\x1d\xe9\x7e\x53\x91\x6e\x3f\x0d\xe9\x01\x53\x90\x7e\x67\x06\x97\xc1\xe9\x46\xe3\xd0\xfc\x73\x49\x33\xfa\x3c\x52\x8c\x1e\x3c\xbd\xe8\xa6\xa9\x45\xb7\x93\x56\x34\x00\xdb\x47\xe2\x79\x26\xe2\x23\xae\xd9\x4d\x2f\xd0\xaa\x22\x60\xd3\x2d\x5a\xf4\x52\xb0\x98\x64\xb9\x76\x17\xf7\x84\x9b\xb4\x3a\x71\xe0\x7e\x6e\xd2\xaa\x6d\x5e\xb8\x4e\xab\xad\x7d\x36\xd7\x69\x35\xed\x59\xb8\x53\xab\xde\xc2\x9d\x5a\xe1\x4e\xad\x70\xa7\x96\x6d\xe1\x4e\xad\x70\xa7\x56\xa8\x71\x18\x6a\x1c\x86\x1a\x87\xfd\xbf\x0a\x35\x0e\x9b\x5b\xa8\x71\x38\xa4\x85\x1a\x87\xbd\x47\x0f\x35\x0e\x43\x8d\xc3\x61\x03\x87\x1a\x87\x24\xd4\x38\x0c\x35\x0e\xbf\xe0\x1a\x87\xe1\x4e\xad\x2f\xe2\x12\x97\x70\x83\xcb\x80\xb1\x3f\xaf\x1b\x5c\xc2\x9d\x5a\xe1\xee\x96\x1e\x2d\xdc\xa9\xf5\x05\xb1\xe3\x70\xa7\xd6\x97\xcc\x91\xc3\x9d\x5a\x81\x2f\x87\x3b\xb5\x8a\x16\xee\xd4\x0a\x77\x6a\x05\x7f\x13\xb6\xe0\x6f\x0a\xfe\xa6\x21\x2d\xf8\x9b\xba\x5a\xf0\x37\x05\x7f\x53\xd3\xe8\xc1\xdf\x14\xfc\x4d\xc3\x06\x0e\xfe\x26\x12\xfc\x4d\xc1\xdf\xf4\x05\xfb\x9b\xc2\x9d\x5a\xe1\x4e\xad\x70\xa7\x56\x31\x72\xb8\x53\x2b\xdc\xa9\x85\x2d\xdc\xa9\xd5\x63\x84\x70\xa7\xd6\x97\x7a\xa7\x56\x2d\x6f\xea\xcb\xbd\x58\x6b\xf8\x32\xc2\xed\x5a\xe1\x76\xad\x86\x16\x6e\xd7\x0a\xb7\x6b\xed\x6a\xe1\x76\xad\x70\xbb\x56\x4b\x0b\x95\x22\x7b\xb6\x50\x29\xb2\x6f\x0b\x95\x22\x9b\x5a\xa8\x14\xb9\xd9\x42\xa5\xc8\x50\x29\x32\x54\x8a\x0c\x95\x22\xb7\x07\x0c\x95\x22\x43\xa5\xc8\x7e\xed\xe1\x1d\x70\xff\x33\x2a\x45\x86\xdb\xb5\x3e\xcb\xab\x60\xc2\x3d\x30\x1d\xed\xf3\xb9\x07\x26\xdc\xae\x15\x6e\x80\x71\x2d\xdc\xae\xf5\x19\xb3\xd4\x70\xbb\x56\x77\xfb\x7c\xb8\x6a\xb8\x5d\x2b\xf0\xd6\x5a\x0b\xb7\x6b\x85\xdb\xb5\x8a\x16\x6e\xd7\x0a\x3e\x93\xc6\x16\x7c\x26\x24\xf8\x4c\x8a\x16\x7c\x26\xbd\xc6\x0d\x3e\x93\xe0\x33\x09\x3e\x93\xf6\x49\x07\x9f\x49\xf0\x99\x74\x0e\x1e\x7c\x26\xbf\x7b\x9f\x49\xb8\x5d\x2b\xdc\xae\xb5\xa3\xfd\xce\x0c\x2e\xe1\x76\xad\x70\xbb\xd6\xef\xf3\x76\x2d\xb8\xd6\x92\x46\xfa\x58\x70\x0d\xbc\x31\x27\xa9\x2f\x3a\xbf\xac\xf5\x66\x4e\xd7\x05\x5b\xe6\xd2\xe9\xfd\xcb\xf7\xa7\xc7\x24\xa2\x9a\x26\x62\x49\x4e\x45\x6c\x4d\xdd\xf8\x45\xf1\x73\x0a\x9a\xc6\x54\xd3\xc2\x4b\x62\xf4\xe3\x4b\x16\x23\x53\x8d\xe1\x9a\xb0\x94\x2e\xc1\x30\xaf\xc6\x49\xe4\x0a\x08\x25\x57\x90\x24\xd3\x0b\x2e\xae\x38\xb9\x04\xa9\x2a\xec\xfa\x93\xc8\xd2\x4f\x44\x81\xbc\xb4\x37\x54\xc1\x75\x66\x10\x8d\x69\x7b\xee\xfb\x99\x54\x87\x2b\x83\xf8\x8f\xed\xd3\x33\x0c\x7a\x6e\xbb\xec\xa9\x58\x3b\x2e\xd3\xcc\xe9\xa9\x11\xec\x9f\x1a\xa2\xce\x95\xcf\x38\x58\xb0\x04\xa6\x73\xaa\x20\xf6\xe3\x2a\x43\x6b\x42\xc6\x76\x6e\xb9\x66\x09\xfb\x15\xdc\x69\x62\x8d\xe1\x4d\x68\xd3\x43\xe0\xe8\x36\x84\x4c\x49\x44\xa3\x15\xbc\x60\xcd\x26\x8c\xa9\x9f\x6a\xf3\x4b\x7d\x6c\x1a\x7e\x9c\xde\x97\xb9\x1d\xbb\x0f\xbc\xd5\x22\x66\x12\x99\xd3\x9a\x28\x2d\xa4\x87\x68\x26\x61\x1a\xd1\x24\xca\x13\x64\x45\x47\xa7\x27\x76\xa4\xee\xeb\xd8\x3a\x48\xac\x5c\xf4\x80\x19\xfb\x4f\xda\xe7\xbc\x8d\x05\x28\x98\xa2\x9d\xf2\x26\xd3\x4e\x21\x15\x72\x7d\x4e\xe5\x12\x6e\x4c\xda\x6f\x2a\x7d\x6d\x12\xf6\x7f\xbd\x7a\xf7\xe6\xe5\x9b\xd7\x27\x6f\x4e\xce\x1d\xbf\xf6\x4e\xbb\x4d\x92\x9f\x55\x1c\x43\x4a\x2c\xb4\x9b\x22\x49\x58\xca\x74\xf1\x95\xa5\xcd\x66\x55\xda\xf2\x73\x4c\x3c\xcc\xb9\x66\x29\x58\x0f\x1d\xd5\xda\x88\x3a\x86\x6e\x52\x00\x8d\xf7\xbf\xa5\xf4\x02\x0c\xd3\x25\xcb\x9c\x4a\xca\x35\xf8\x23\x82\x69\xfb\x51\x2c\x88\x12\x4e\xc1\x67\xaa\xf4\xe6\x29\xd0\x36\xdd\xea\x54\x34\xb3\x1a\xec\x61\x45\x2f\xed\x05\x5d\x0b\x61\xf8\xba\xd9\xd4\x54\xc4\x6c\xc1\x22\x6b\x31\x22\x29\x8d\x8b\x14\x21\xa7\x70\x80\x2c\x8e\xc5\x72\xc1\x6d\x54\xb9\x09\x66\xe0\x97\x4c\x0a\x8e\x8a\xd4\x25\x95\x8c\xce\x13\x28\xfc\x94\x0a\xb4\x1d\xaf\x5c\x10\x27\xf3\xb5\x86\x66\x76\x65\x47\x70\xbb\xe1\x6e\x76\x6b\xee\xef\x51\x63\x3f\xe7\x65\x2e\x5f\x29\xc4\x98\xef\x99\x4b\xf2\x88\x41\x31\xc7\x14\x25\xc4\x79\xe4\x41\x27\x74\x26\x99\xd5\x0a\x69\x81\x31\x8e\x49\x53\x45\xd2\xdc\x9c\xe0\x46\x3a\x52\x8a\xcd\x13\x98\x18\x19\x88\x35\x27\x21\x95\x7d\xcc\xc1\x40\x19\x7b\x42\xc9\xe4\x12\x0c\xbe\x19\x3c\xb6\x22\x30\x80\x11\x90\x04\x5e\xb2\x46\xad\x98\xe3\x5d\xbb\xe6\x9c\x8e\x9c\x63\xff\x64\x41\xd6\x22\x97\xb5\x63\x61\x45\x0d\x1e\x23\xf5\x36\x4e\xc4\x25\x0e\x22\x0f\x9a\x90\x18\x8c\x42\xc1\xb8\x39\xa1\x96\x42\xc4\x46\xaf\x90\xe2\x9a\xa5\x38\x8a\x23\x80\x62\xd7\xe6\x6b\x12\x8b\xdc\x7a\x49\x11\x4d\xcc\x51\xe0\x4e\xb1\x8c\x46\x17\x66\x0e\xd8\x71\x5b\xca\xe7\x81\x4e\xb3\x03\x7c\xcb\xfd\xd7\x7d\xa9\x66\x3f\x2b\xc1\x4b\xe7\x78\xb1\xac\x59\xaf\xdd\x65\x8a\xcc\x41\xe9\x29\x2c\x16\x42\xea\xbf\x9a\xfd\xcd\x39\x12\x0d\x17\x05\x00\x3d\x02\x61\x20\x04\x42\x1b\x53\x77\xea\x54\x2f\xe4\x0e\x06\x52\x41\xbd\x26\x16\x98\x19\x7a\x97\xfc\x90\xfc\xdf\xbd\x7f\xfd\xe1\xb7\xe9\xfe\x77\x7b\x7b\x1f\x9e\x4d\xff\xf2\xf1\x0f\x7b\xff\x9a\xe1\x3f\x9e\xee\x7f\xb7\xff\x9b\xff\xe3\x0f\xfb\xfb\x7b\x7b\x1f\x7e\x78\xf3\xea\xfc\xf4\xe5\x47\xb6\xff\xdb\x07\x9e\xa7\x17\xf6\xaf\xdf\xf6\x3e\xc0\xcb\x8f\x3d\x3b\xd9\xdf\xff\xee\xbf\x1a\x26\x44\xf9\xfa\xdd\xa2\x95\x88\x7b\x65\x3d\x4f\xfb\x9c\x47\x35\x51\x8f\x71\x3d\x15\x72\x6a\x3f\x38\x24\x5a\xe6\xbb\x45\x5c\x23\x0f\x77\xb9\x8c\xfb\x9e\x07\x6f\x2b\x7d\x6d\x78\x92\xdc\x15\x89\xce\x8c\x69\x66\x53\x70\xf6\xcc\x4a\x7d\x0b\xb3\xed\x5e\x1d\x68\x3e\xe2\xce\x76\xf4\x88\x72\xbe\xfb\xf2\x89\xf2\x11\x1a\x1b\xfd\x6f\x24\x2f\x5b\x8e\xdf\x36\x56\x0f\xd9\x69\x98\x69\xa6\x73\x0b\x33\xc9\x84\x64\x7a\x7d\x9c\x50\xa5\xde\xd2\x14\x6e\xba\x21\x27\x8b\x52\x3d\x9b\x18\x7a\x36\xe7\x8f\x3b\xa0\x5d\xc8\x8c\x1b\xb2\x19\xe0\x27\x0b\xd4\x4f\x2a\xfd\x78\xa0\xfa\x6f\x0b\xc2\xf4\x24\x2e\x24\xf9\x15\xa4\x70\x97\x65\x4a\xb0\x3a\x4e\xe3\x08\xee\xb3\xf6\x7d\x68\x01\x9b\x82\x28\x47\xb0\x19\xf9\xe8\xda\x68\x1a\x0b\xb6\xbc\x29\xe8\xce\x76\x75\x4a\x22\xca\xcd\x42\xf1\xfa\xd7\x05\xf9\x94\xc0\x92\x46\xeb\x4f\x66\xc1\x9f\x24\x98\x29\x1a\xdd\xf0\x93\x55\x1a\x6a\x6a\x81\x8b\x4e\x62\x8a\x00\xc3\x1b\x81\x19\xff\xd9\x2a\x92\x5e\x2b\x6f\x9c\x89\xc4\xba\x0d\x99\x88\x67\x66\x0f\x66\x1b\xab\x45\x16\x5a\x3c\x2c\x44\x89\x0f\x4f\x3f\x6e\xbd\xe9\xac\x9c\x5a\x58\x65\xb3\x4a\x1c\x32\x47\xae\xdf\x26\xd7\x78\x80\x90\xa3\x38\x65\x68\x9a\x25\x7b\xa7\x67\x47\xfb\xb5\x95\x1b\x29\xc7\x9e\xc3\xb1\x00\x1f\x1f\x64\x06\x52\xa5\x91\x15\xcf\x50\x4c\x0d\xb5\x24\x8c\xb9\xa1\x7e\x2e\x06\xc0\x68\x77\x6d\x49\x87\xf6\x93\x3d\x3b\x22\x9f\x8c\x84\x9c\x30\x0e\x76\x0f\x32\xc9\x2e\x59\x02\x4b\x33\x93\x4a\x80\x83\x77\xd5\xec\xde\x53\xa6\xcc\x29\x55\x47\xef\x14\x73\x9b\x2d\x5a\xb7\xe0\xad\x3b\xb8\x5d\xac\x49\xc5\xea\xf7\x44\xe1\xf4\xbc\x4c\x5c\xca\x0b\xb5\xb7\x10\x1d\xf8\x42\xc8\xc8\x9c\xe6\x3b\xe0\xa8\x5d\xf2\xbf\x01\x4d\xbb\xc0\x89\x66\xa1\x42\xbb\xa4\xca\xd0\x5e\xad\xc3\x2b\x5a\xa9\xb0\x30\x23\xef\x0c\x12\x5e\x31\x05\x93\x42\xea\xdd\xd9\x85\xc7\xf0\x2b\xda\x2c\x27\x56\xba\x3d\xc3\x7f\xae\xad\x17\xca\x99\x69\x10\xdd\x51\x8e\x6a\xa2\x17\x22\x41\x19\xbe\xc1\xb8\xfd\x8a\xc5\x56\xf4\x01\x29\x85\x9c\xd9\x12\x08\x56\x3f\x16\x49\xdc\x72\x4a\x16\x6a\xba\x11\x59\xd0\x08\x65\xf1\x8b\x23\x07\x73\x68\xb3\x1b\xcc\x0d\xb8\x51\x97\x57\xb5\x28\x96\xd0\x22\x10\xbd\x11\x58\x37\xc1\x16\x27\x31\xcb\xa0\x73\x91\x6b\x8b\x0f\x96\x7d\x2c\x44\xce\x63\x62\x38\xe3\x21\x59\x69\x9d\xa9\xc3\x83\x83\xf2\xe8\x9e\x31\x71\x10\x8b\x48\x1d\x44\x82\x47\x90\x69\x75\xe0\x09\xf9\x20\x13\xf1\xd4\xff\x31\xa5\x9e\x0e\x0f\xc6\x32\x4e\x42\x80\xe7\x2d\xb7\xc5\x4e\x89\x5d\x6d\xcb\x0b\x25\x30\x77\xbe\xa4\x45\xe2\x22\x3b\x1b\xcf\xc5\xfa\x1d\xc2\xe5\xfb\xc5\x1d\xc7\x85\xe0\x5f\x61\xa4\x4f\x54\xb5\xeb\xf6\x93\xa3\xcd\xe2\xdc\x61\x63\xee\x6f\xb2\x3d\xf7\xbc\xd4\x48\xc3\xe5\x2a\x50\x0a\xd2\x9a\xe2\xf5\xc7\x46\x5d\xb2\x4f\x0c\x1b\xe4\x6b\x62\x58\xb5\x76\x77\x71\x5b\x53\x65\x9b\x66\xbf\x32\x72\x13\xd6\xfe\xf8\xa6\x70\xc7\x4d\x60\xb1\x80\x48\x7f\x5b\x31\x1f\x15\xd5\x2b\x0a\x77\xd7\x37\xfe\x5f\xdf\x36\x9f\xf2\xbd\x3c\x53\xfd\xc2\x52\xec\x94\xda\x6d\xe2\xc3\x6c\xe1\x2f\xb1\xc7\x0d\xf9\xc5\x02\xcf\x0e\x86\xea\x3d\xfa\xac\x9d\x81\xd5\x7a\x2b\x9c\x5c\x98\x24\xb5\x97\x3b\x03\x33\x90\xdf\x54\x0e\x04\x67\x9a\x2d\xbd\x84\x40\xde\x0a\x57\x06\x08\x26\xe4\x14\xaf\xa4\x2e\x7f\xc1\x13\xf9\xad\xb0\x05\x81\x3a\x2a\xbc\xf4\x34\xe4\x76\xc6\xf5\x0c\x83\xe7\x0f\x65\x98\x8f\x05\x4c\x2d\xcc\xa7\x24\xac\xaa\x7f\xac\x15\xb0\x17\xb0\xee\x84\xaa\x3b\xfc\x5c\x88\x11\xfa\x9f\x26\x25\x8e\x7a\xcd\xc0\x46\x50\xfc\xd5\x55\x8d\x10\xe9\x9c\x71\x3b\x15\x3b\xb0\xdf\x67\x1c\xdb\xef\x07\x8f\xf1\xcf\xee\x49\xf4\x84\x76\xbf\x58\xa3\x61\x20\x7f\x37\x20\x8e\xa8\xf0\x48\x77\x81\x74\x57\xbc\x50\x25\x48\xe8\xe5\x2f\x39\x4d\x66\xe4\x85\x15\xaf\x11\x78\xf6\xa7\x2e\x72\xb3\x5d\x6c\xf9\xe9\xaf\x58\x12\x47\x54\xc6\xa8\x59\x59\xf6\x43\x94\xb0\x88\x43\xbd\xf4\xd6\xd1\xb7\x67\x80\x25\xf2\xd8\xcb\xe2\x49\x46\xa5\x66\x51\x9e\x50\x69\x18\x3e\x2c\x85\xec\x88\x9b\xef\xb9\x99\x25\x36\x9f\x41\x24\x78\xdc\xe1\x51\x1c\xb6\xab\xe7\x9b\x9d\x57\xb7\x17\xe5\x36\x90\xcc\x55\xa2\x61\x29\x6c\x92\xd7\x5e\x4d\x2b\xee\x18\x4b\x2c\x3c\xb3\x2b\x78\xcb\xc4\x0a\x57\x46\x8c\xab\x16\xcc\x62\xca\x67\x25\xec\x57\x0e\x9c\x82\xda\x67\xe4\xef\x45\xac\x7c\x57\x68\x12\xd3\xde\x3f\x85\x66\x1c\x37\x5f\x47\x8a\x6e\x27\x4b\x36\xb2\x10\x12\x2e\x41\x92\xbd\x58\xe0\x37\x58\xf8\x6a\x7f\x46\xfe\x69\x94\xc1\x36\xf7\x8c\x6d\x1c\x96\xb6\x74\x92\x23\xec\x22\x59\x44\x02\xda\xed\xa9\x22\xcf\xc8\x9e\xad\xa6\xc5\xd2\x14\x62\x46\x35\x24\xeb\x7d\x2f\x7d\x5b\xb3\x5a\x1f\xac\xe9\x53\x64\xae\x52\x5c\xee\xeb\xff\x6e\x79\x13\x27\x7b\x9b\x48\xf5\x93\x37\x37\x97\x80\xb5\x92\xf4\x06\xf6\x14\xde\xcd\xce\xe0\x85\xc6\x48\xb5\x49\xc9\x6b\x2a\xa2\xae\xe7\xcd\x05\x6e\xfd\x6c\x10\x94\x12\x09\x4b\xa4\x4f\x4b\x73\x37\xa0\x4e\x16\xed\x2e\x93\xd7\x21\x84\xb4\x7b\xaa\xa6\xc4\xe8\x82\x5f\xff\x77\x4c\x35\x6d\x78\xc1\xa2\xcc\x3a\xdb\x45\x6a\x5d\xb2\x4d\xd9\x79\xd3\x5e\xf7\x70\xbd\xb8\xe1\x47\xf5\x80\x4a\xcd\xae\x2f\xfb\x60\xd7\x09\xda\xa6\x6d\x0c\xa5\x47\x83\xa9\x84\x25\x53\x5a\xae\x2b\x0e\x08\xe7\xda\x14\x84\x71\xa5\x29\xd7\x0c\x59\x35\xf1\x6f\x4e\x9d\xf1\xfd\x8a\xe9\x86\xf0\xc0\x77\x46\x77\x47\xd3\x2e\x66\x0c\x59\xe3\xc7\xf9\x3a\x03\xf2\xb7\xca\x1f\xaf\x64\x16\xed\xfe\xfe\x64\x41\x1c\x03\xb5\xb8\x49\xe3\x58\x82\xda\xe6\x6c\xbb\xbe\x6e\x05\x9f\x37\x56\x8d\x85\xe0\xa9\x37\x76\xb9\xb4\x25\xa5\xd8\xd2\x28\x29\xbe\xae\xa6\xf7\xe5\xd4\x94\x15\xf3\xab\x1b\xd8\x7a\x6c\x21\x2d\x4e\x4c\xa6\xbd\x3a\x18\x09\xae\xf2\xb4\xb4\x22\xc4\x90\x01\x8f\x81\x47\x6b\x2c\xa5\x95\x5c\x42\x43\x78\xcf\x8f\xaa\x01\x25\x08\xf9\xdf\x6c\x69\xd4\x6e\x37\xb9\xaa\xe4\xec\xbd\xd6\x1b\x33\x65\xca\x00\x7e\x01\xd2\x28\xff\x98\xd1\x63\x84\x5e\xdf\x43\xc5\x0b\xe9\x6a\x7b\xf9\x60\xd3\xcd\xc9\x62\x81\xc5\xdd\xd3\x3d\x2f\xaa\x82\x7a\x7f\x85\x87\xa9\xe5\x40\x06\x1c\x4b\x61\xbd\xfc\x99\x50\xcc\x97\xd1\x2b\xce\x85\x5a\x65\x51\xb1\xb0\x75\x3f\x9b\xc7\xaa\x67\xe3\x61\x80\xf6\xc6\xa2\xd1\xf6\x96\x73\xbb\x99\x50\xb5\x68\x7a\x5e\xd8\x50\xbb\xf4\x7c\x7b\xab\x8b\xe8\x1a\x4c\xe1\xab\x2f\xad\x3c\xcb\x24\xe5\x17\x10\x93\x04\xae\x59\x24\x96\x92\x66\x2b\x16\x61\x91\x48\xeb\xea\x35\x1a\xa3\xb6\xa1\x55\xcd\x18\xde\x74\x7a\x65\xf9\x3c\x61\x6a\xb5\xdb\x69\xd8\x4a\x1c\x0a\x22\x09\x7a\x27\xe7\xeb\x43\x1b\x67\xf6\xf3\x52\xf8\xf1\x81\xf0\xae\x5f\x97\x47\x62\xb1\xdd\x27\xc0\xd2\x28\x32\x84\xed\x1d\xa0\xe0\x24\xc1\x0a\x11\x35\x70\x08\xed\xfd\x4c\xa6\x97\x0b\x80\xcc\xe2\x33\x06\xb0\xa9\x14\x6d\x8b\x8a\xf1\x08\xb0\xe8\xa5\x2b\x5e\x0a\xe0\x7d\x00\x5a\x32\xb0\x12\x2c\xa0\xd7\xcf\xef\x22\x70\xbd\x5b\xe2\x6c\x37\x22\xb4\x18\x10\xda\x21\x5e\xf0\xc2\x4e\xa0\x57\x78\xa8\x17\x0a\xcc\xbf\x0d\x78\xf1\xc9\xd0\xcd\xb6\x35\x4e\xcf\x6c\x88\xf8\x68\x7e\xf8\x63\xad\x17\x17\xdb\xa5\xc8\x4a\x5c\xb9\x01\x36\x39\x86\xb3\xca\x79\x34\x88\x99\x8a\x0c\x9b\x69\x30\x1c\x1d\x0b\xae\x7c\x4d\x53\xca\x6d\x19\xd2\x4b\x9a\xb8\x44\x5e\x37\x58\x26\x12\xf4\x83\xc6\xb9\xd7\x57\x6d\x4a\x12\xa4\x73\x88\x63\x88\x7d\x1c\xfc\x9a\x34\x1c\xfa\x1d\x02\x47\x97\x4c\xe0\x8f\xc5\x53\x91\x24\xed\x67\x7a\xab\x61\xa5\x8f\x59\xc5\x03\xa0\x77\x9c\x49\x87\x98\x79\xe2\x01\xca\x54\x41\x91\xa5\x23\x1a\x91\xcc\x28\x2c\x05\xdc\xe7\xa0\xaf\x00\x38\x89\x56\x10\x5d\xa8\x32\xf6\x4e\x1b\x3a\xdc\xd8\x68\x17\x5b\xd5\x2e\x20\x56\x39\x68\x21\x98\x9a\x0d\x75\xc9\xf8\x40\x98\x51\x0b\x39\x5c\x6d\xc6\x6a\x6d\x1f\x5c\xf4\x92\xb2\x84\xce\x93\x0e\x85\xf9\x64\x51\xbe\x39\xa9\xce\x9f\x79\xe9\x28\xcb\x93\xc4\x79\xa5\x31\x4a\x45\x4b\xba\x58\xb0\x08\x83\x17\x31\x4a\xa7\x8c\xf6\xdd\xb9\xf4\x51\x91\x39\x4a\x53\x9d\x6f\x6d\x7d\x0b\xde\xb4\xe1\x8b\xd1\x42\x59\xa3\xbd\xb5\x0f\x86\xbc\xaf\x6b\xb0\x66\x76\x60\x55\xf4\x9a\x43\x6b\x46\xde\x0a\xed\xa2\xe0\xde\x80\x52\x2e\x02\x8f\xbc\x07\xaa\x04\xaf\x1c\x05\xa8\x79\x48\xb6\x64\x9c\xee\xae\xa6\x60\xd7\x5f\xb5\x98\x17\x8a\x26\x5d\x63\xd1\x67\xb6\x94\x54\x17\x1c\xbc\x5c\xa2\x3b\x34\x9d\x58\xb0\xc8\x31\x12\x8e\x1c\xf1\x35\xa2\x8d\x0b\x8d\xdb\x6d\x53\x65\x5c\x4b\x11\xe7\x11\xb8\xf2\xd6\xb9\xaa\x76\x7c\xab\xe7\x40\x3d\xfc\xcb\x8f\x51\xa6\x19\xc4\xa0\x29\x73\xfe\x6a\xc1\x81\x50\x95\x19\x2d\xdf\x63\x7b\x2e\x25\x9e\xa8\x7e\x1f\xf0\xb0\x3b\x3a\x3d\x21\xef\xa1\x0d\xe9\x3a\xf9\x4e\x57\xdc\xdf\x94\x24\x54\xe9\x73\x49\xb9\xc2\x09\x9f\xb3\xb4\xc9\x44\x61\x34\x2b\xc4\x80\xc6\xe7\x12\xb1\xa2\xf1\xb1\x45\x81\xc6\xc7\x0d\xdc\xbb\x0f\xe7\xdc\x5e\xc3\x6d\x58\xfb\xb7\x7b\x2d\xd3\x26\x8d\x5c\xe2\x6d\x3d\x05\x46\x19\x4e\xe2\xde\x06\x57\x02\xdc\x6c\xb5\x43\x7d\xcc\x81\x41\x75\xbb\x8d\x81\x60\xa0\x4d\xa9\xa5\x5f\x79\xdf\x60\xce\x63\x90\x09\xfa\xdc\xca\xf1\xa2\x95\x11\x92\xe3\x99\x53\xfe\x69\x61\xc7\xc1\xf0\x57\xe7\x59\x2c\x9d\x06\x36\x60\xce\xf7\x68\xb0\xcb\xd5\x5e\xb7\xdd\x20\x93\x8d\x22\xc8\x74\x3b\x97\xed\x65\x93\xf3\x86\x15\x23\x24\x4c\x75\x33\x56\x39\x9c\xba\x8d\xfd\x72\x5d\xd9\x98\x94\x55\x9e\x52\xc3\xa0\x68\x8c\x01\x72\xc5\x33\xab\x23\x59\x9d\xc6\x52\xa4\x75\xdc\x59\xc7\x8b\xdf\xbe\xce\x1d\x72\x4c\x8b\x16\x29\xf8\x1d\xd6\x92\x5e\x30\x4b\xe9\xf5\x6b\xe0\x4b\xbd\x3a\x24\x7f\xfc\xea\x4f\x5f\xff\xb9\xe1\x45\x31\xb7\x31\x6d\xaf\x80\x3b\x2b\xd1\x6d\x40\x6f\xbb\xd7\x4d\x33\xe7\xcc\x87\x68\xcf\x96\xe5\x3b\x85\x43\xa1\xc4\x4a\x74\x42\x83\x76\x7c\x3b\xcf\xda\xc1\xf9\x3d\x26\x19\x28\x4d\x79\x04\x13\x23\x0e\xec\x1c\xc6\xa8\xb2\x96\x47\x26\x6b\xf2\xfc\xab\x09\x86\x78\xe2\xa4\x2c\x75\xcd\x4a\xb6\xfe\xe1\xfa\xe3\x6c\xc7\x62\x98\x22\x7f\x99\x6c\xcc\x94\x29\x62\xf6\x5e\x2c\x10\x4d\x5b\x26\x89\x6a\x9f\x04\xcb\xb3\xbd\x11\x60\x9b\x67\x43\xb1\x92\x2e\x4c\xe8\xb2\x4d\xf6\xb3\x4b\xa6\x8c\xb3\x34\x4f\x0f\xc9\xb3\x86\x57\x2c\x47\xbe\x0d\xf4\xb0\x3d\x95\xe7\x19\x35\x6c\x79\x29\x69\x9a\x62\x26\x16\x8b\x81\x6b\xb6\x60\x18\xf3\x52\x90\x18\xea\xf1\xf6\x43\x1f\xa7\x55\x00\x1f\x43\xb8\x0c\x1b\xed\x45\x74\xa7\xf6\x1c\x97\x28\x94\x3a\xaf\x5d\x54\xe5\xbc\xeb\x0c\x2c\x55\x5a\xb5\x81\xc0\x75\x66\x25\xb9\x8a\xff\x28\x05\xca\x19\x5f\xaa\x32\xda\x12\xf9\x5f\x9b\x79\xdc\x7c\x76\xb5\x02\x17\xdd\x00\x55\xef\xa0\x2f\x69\x64\x44\xc9\x32\xc8\x18\x03\xcf\xdb\xd9\xc7\xb6\x2d\xd6\x48\x5d\x29\x24\xc7\x54\x41\x0f\xbb\x6b\x25\x16\xd3\x5f\x47\x51\x24\x0d\xdf\x1a\x03\x7a\xfe\xec\xab\x56\xbc\x2b\xde\x6b\x7c\xa9\x8c\xd2\xfc\x70\x34\xfd\x27\x9d\xfe\xfa\x71\xcf\xfd\xe3\xd9\xf4\x2f\xff\x6f\x72\xf8\xf1\x69\xe5\xcf\x8f\xcd\xc1\x95\xbb\x25\xe7\xb2\xd5\x70\xd8\x9d\xb5\x5e\xac\xf2\xf8\x31\xf1\xc1\x5c\xe7\x32\x87\x09\xf9\x9e\x26\x0a\x26\xe4\x47\x8e\xe7\xe4\x0d\x81\xd6\x1e\x5d\x61\x24\x9b\xc7\x66\xd4\xc7\xed\xaf\xe0\x94\xda\xdf\x71\xd3\x6d\x53\x41\xfb\x01\xc9\x5b\x16\x2a\x8c\x90\x57\x30\xd0\x66\x7d\x2d\x84\x98\xc1\x35\x4d\xb3\x04\x66\x91\x48\x0f\x8a\xe7\xb7\x78\xc8\x3d\xff\xba\x07\xf6\xec\x7d\xb0\x38\xf2\x71\xef\xc3\xd4\xfd\xeb\xa9\xff\x69\xff\xbb\xbd\x7f\xcd\x5a\x9f\xef\x3f\x3d\xc0\xd0\xde\x02\xd5\x3e\x7e\x98\x96\x68\x37\xfb\xf8\x74\xff\xbb\xca\xb3\xfd\x5d\x48\xb8\x9d\x5c\x95\xd2\x6c\x7a\xd1\x58\xd9\xae\x51\x94\x6d\xca\xd2\x4a\x69\xb6\x4b\xc5\x5b\xb0\xe5\x1b\x9a\xbd\x87\x05\x48\xe0\x51\xb7\x09\xe9\x78\xeb\x13\xb2\x17\x9b\x23\x1c\x73\xf3\xf6\xbd\xf0\x2a\x8b\xa7\xee\x20\x2b\xbe\xf3\xdc\xbd\xb8\x28\x6a\x33\x26\xa7\x16\xe9\x36\x29\x45\xc9\x1d\xca\x7a\xd9\xeb\xed\xbb\x80\x38\x6d\x10\x27\xa7\x65\xd4\xdd\x08\x43\x8f\x39\x8e\xac\xc1\xab\x4d\x83\xe8\x81\xe4\xfd\x64\x5f\xde\x12\xfa\xdb\x39\x48\xb1\xce\xd1\x3d\xf8\xfb\x3b\x7e\xb2\x76\x97\xd1\xfd\xe4\xac\x51\xb9\xec\x2b\x5d\xfc\x78\xf2\xc2\xe2\x0c\x32\x26\x94\x28\x57\x22\x89\x15\xc9\x39\xfb\x25\x07\x72\xf2\xa2\xa8\x4a\xc4\x78\x94\xe4\x78\xdf\xd1\x8f\x3f\x9e\xbc\x50\x33\x42\xfe\x0e\x11\x35\x7a\xfd\x55\x4b\x08\x27\x96\x04\x7c\xf7\xf6\xf5\xff\x41\x0b\x00\x7e\xe9\xae\x1b\x71\x05\x47\x12\x46\xad\x45\xca\x1e\xbe\xa6\x57\x1b\xdd\x88\x33\x8a\x68\xd6\x6c\x63\x20\xce\x6e\xc7\x6d\x90\xee\x0a\x92\x4c\x61\xf2\x11\x51\xb9\x74\xab\x31\x03\xda\x2c\x08\x4c\x4f\x77\x8e\x73\x9f\x4f\x85\xf9\x6a\xa3\xe2\xa2\x23\xc1\x39\x44\x18\x99\x60\x84\xd0\x3e\x1c\xa2\xfa\xfe\xa6\x80\xbf\x53\xa2\xdd\x0c\x70\x2e\xc7\xf4\xfc\xc3\x1b\x48\x6f\x9f\xd0\x0d\x45\xbe\x73\xa2\x3c\xce\x78\x04\x55\x3b\xdf\xe2\x68\xfc\x36\x73\x70\x70\xbb\x73\x96\xb0\xb5\xde\x51\x23\x5a\xa3\x25\x3a\x83\xdf\x77\xd8\xae\xeb\x61\x99\x5b\x96\x8d\x8d\x9c\x57\x34\xa0\x16\xfe\xe4\x15\x55\x64\x0e\xc0\xd1\x9e\x6b\xed\x76\xc0\x1d\xce\x43\x69\x6d\xcd\xb3\xa9\x16\xd3\x06\xfd\xaa\x03\x72\xdd\x50\x6b\x31\x1f\xd4\xd6\x76\x34\xd8\x20\x70\xb5\x5a\xef\x82\x81\x2a\x6f\x3b\xf2\xd2\xd3\x60\x2f\x75\xb3\x56\x56\x9b\xb3\x33\xb2\x16\xc7\x36\xfe\xb5\x3d\x25\xa3\x6e\xd7\x0c\x4f\x5a\xa0\x7b\xb1\x46\xcd\x23\xe6\x68\xb7\xf9\x0c\xe4\x25\xeb\x21\x7c\xbc\xaf\xbf\xdf\x8b\xb5\xbc\x7a\x7f\x7a\x8c\xf9\x79\xe6\x03\xef\x9f\x40\xec\xaf\x4a\x15\xb7\xef\xd1\x89\x6c\xa8\xd1\xd1\xdd\x13\x74\x26\xe4\xf8\x41\x32\x29\xb4\x88\x44\x87\xd3\xa9\x35\x65\x06\x41\xdb\x96\x63\x34\xa4\x8f\xa1\xf2\x86\xe5\x63\xb5\xac\x34\xa5\x85\x34\xe4\x5a\xfb\x2d\x9f\x17\x77\x89\x95\xbd\x3b\xad\x8f\xfc\xfb\x3f\x8f\xfe\x7f\x00\x00\x00\xff\xff\x1e\xa8\xb9\x4c\x11\x5b\x01\x00") +var _operatorsCoreosCom_catalogsourcesYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x6b\x73\x1c\xb7\xb1\xe8\x77\xfd\x0a\x94\xee\xa9\x12\xa9\xec\x2e\x25\xe7\x94\x93\x30\x8e\x5d\x0c\x25\xeb\xb2\xac\x07\x4b\xa4\x9d\xba\x51\x74\xaf\xb0\x33\xbd\xbb\x30\x67\x80\x31\x80\x21\xb9\x8e\xf3\xdf\x6f\xa1\x01\xcc\x63\x77\xe7\xc9\x97\xe4\x03\x7c\xb0\xc5\x9d\x19\x3c\x1a\xdd\x8d\x7e\x83\x66\xec\x27\x90\x8a\x09\x7e\x48\x68\xc6\xe0\x5a\x03\x37\x7f\xa9\xd9\xc5\x9f\xd5\x8c\x89\x83\xcb\xe7\x8f\x2e\x18\x8f\x0f\xc9\x71\xae\xb4\x48\xdf\x83\x12\xb9\x8c\xe0\x05\x2c\x18\x67\x9a\x09\xfe\x28\x05\x4d\x63\xaa\xe9\xe1\x23\x42\x28\xe7\x42\x53\xf3\xb3\x32\x7f\x12\x12\x09\xae\xa5\x48\x12\x90\xd3\x25\xf0\xd9\x45\x3e\x87\x79\xce\x92\x18\x24\x76\xee\x87\xbe\x7c\x36\x7b\xfe\xa7\xd9\x57\x8f\x08\xe1\x34\x85\x43\x12\x51\x4d\x13\xb1\xb4\x63\xa9\x99\xc8\x40\x52\x2d\xa4\x9a\x45\x42\x82\x30\xff\x4b\x1f\xa9\x0c\x22\x33\xc8\x52\x8a\x3c\x3b\x24\x3b\xdf\xb1\xfd\xf9\xb9\x50\x0d\x4b\x21\x99\xff\x9b\x90\x29\x11\x49\x8a\xff\x76\x6b\xb4\xc3\x9e\xe1\xb0\xf8\x7b\xc2\x94\xfe\x61\xfb\xd9\x6b\xa6\x34\x3e\xcf\x92\x5c\xd2\x64\x73\xc2\xf8\x48\xad\x84\xd4\x6f\xcb\xe1\xcd\x70\x11\xd5\x4a\x46\xf6\x31\xe3\xcb\x3c\xa1\x72\xe3\xdb\x47\x84\xa8\x48\x64\x70\x48\xf0\xd3\x8c\x46\x10\x3f\x22\xc4\x41\xca\x75\x35\x25\x34\x8e\x11\xfa\x34\x39\x95\x8c\x6b\x90\xc7\x22\xc9\x53\x5e\x0c\x65\xde\x89\x41\x45\x92\x65\x1a\x21\x7c\xbe\x02\x92\x49\xd0\x7a\x8d\x20\x21\x62\x41\xf4\x0a\xfc\xd8\xc5\x57\x84\xfc\xac\x04\x3f\xa5\x7a\x75\x48\x66\x06\xc2\xb3\x98\xa9\x2c\xa1\x6b\x33\x9b\xca\x5b\x76\x9b\x5e\xd8\x67\x95\xdf\xf5\xda\x4c\x5d\x69\xc9\xf8\xb2\x6d\x2a\xe6\xbd\xfe\x73\xb0\xa0\x39\x5f\x67\xdb\x53\xd8\xf8\xb1\xef\xf8\x59\x3e\x4f\x98\x5a\x81\xec\x3f\x89\xe2\x93\xad\x39\x9c\xee\x78\xd2\x30\x91\x4a\xa7\x9e\x6e\x66\x91\x04\x24\x99\x73\x96\x82\xd2\x34\xcd\xb6\x06\x38\x5a\x6e\xaf\x31\xa6\xda\xff\x68\x5f\xba\x7c\x4e\x93\x6c\x45\x9f\xbb\x1f\x55\xb4\x82\x94\x96\xf8\x20\x32\xe0\x47\xa7\x27\x3f\xfd\xf1\x6c\xe3\x01\xa9\x43\xa7\x86\xe7\x84\x29\x42\x89\x84\x4c\x28\xa6\x85\x5c\x1b\x68\x1d\x9f\xfd\xa4\x26\xe4\xf8\xfd\x0b\x35\x21\x94\xc7\x05\xe1\x91\x8c\x46\x17\x74\x09\x6a\xb6\x35\x57\x31\xff\x19\x22\x5d\xf9\x59\xc2\x2f\x39\x93\x10\x57\x67\x61\xc0\xe3\x61\xb2\xf1\xb3\x81\x7f\xe5\xa7\x4c\x9a\x31\x75\x85\x90\x6d\xab\x30\xb3\xda\xef\x1b\x2b\xfc\x6d\xba\xf1\x94\x10\x03\x18\xfb\x25\x89\x0d\x67\x03\x85\x48\xe1\xa8\x0e\x62\x07\x4d\x8b\x2c\x4c\x19\x88\x48\x50\xc0\x2d\xaf\x33\x3f\x53\xee\x56\x39\xdb\xea\xfc\x0c\xa4\xe9\xc8\x30\x84\x3c\x89\x0d\x4b\xbc\x04\xa9\x89\x84\x48\x2c\x39\xfb\xb5\xe8\x5d\x11\x2d\x70\xd8\x84\x6a\x50\x9a\x20\x5d\x73\x9a\x90\x4b\x9a\xe4\x80\xc0\xde\xea\x3b\xa5\x6b\x22\xc1\x8c\x4b\x72\x5e\xe9\x11\x3f\x51\xdb\x73\x79\x23\x24\x10\xc6\x17\xe2\x90\xac\xb4\xce\xd4\xe1\xc1\xc1\x92\x69\xcf\xec\x23\x91\xa6\x39\x67\x7a\x7d\x80\x7c\x9b\xcd\x73\xc3\x50\x0f\x62\xb8\x84\xe4\x40\xb1\xe5\x94\xca\x68\xc5\x34\x44\x3a\x97\x70\x40\x33\x36\xc5\xc5\x70\x64\xf8\xb3\x34\xfe\x5f\x12\xaa\x1c\x70\x13\x0d\x36\xa8\x81\x78\xbe\x3b\x70\xb3\x0c\x3f\xb6\x88\x69\x3b\xb4\x8b\x2d\xf7\xc4\xfc\x64\xc0\xf8\xfe\xe5\xd9\x39\xf1\x33\xb2\xfb\x66\xb7\xa8\x7c\x75\x07\x84\xfc\x6e\x19\xc8\x32\xbe\x00\x69\xbf\x5c\x48\x91\x62\xaf\xc0\xe3\x4c\x30\xae\x2d\xd7\x48\x18\x70\x4d\x54\x3e\x4f\x99\x56\x88\xd6\xa0\xb4\xd9\xc8\xed\x8e\x8f\xf1\x70\x24\x73\x20\x79\x66\x88\x37\xde\x7e\xe5\x84\x93\x63\x9a\x42\x72\x4c\x15\xdc\xfb\xde\x99\x3d\x52\x53\xb3\x21\xbd\x77\xaf\x7a\xf4\x6f\x7f\xb0\x45\xf5\x84\xf8\x33\xbb\xd7\xcb\x4d\x6c\x82\x58\x9e\xb0\xeb\x4c\x20\x2d\xdc\xc1\x34\x1a\xc7\x12\xd4\x8e\x07\xdd\x58\x67\xda\x91\xfd\xdc\x22\xdf\x4a\x28\x83\x04\x54\x93\x77\xaf\xdf\x90\x88\x72\x92\x2b\x30\x24\x1c\x09\xce\x0d\x96\x69\x41\xa8\x39\x73\xa7\x70\xcd\x14\x62\xa5\x84\x25\x53\x5a\xae\xb7\xf7\xd6\xb4\xef\x85\x4c\xa9\x3e\x24\xdf\xf8\xd7\xa6\x38\x84\x90\x84\x65\xdf\x1e\x7e\x93\x09\xa9\xbf\xdd\xf9\xe1\x3b\x9e\xac\xcd\xe0\x31\xb9\x5a\x01\x27\x67\x05\x64\xc8\xdf\x2a\x7f\xbc\x92\x59\xb4\x7b\xe0\x93\x25\x17\xd2\x7f\x6d\xd0\xfa\x24\xa5\x4b\x20\x0b\x06\x09\x12\x9a\x82\x1d\x5c\xad\x05\x2d\x88\x15\xfc\x16\x6c\xf9\x86\x66\x63\x61\x7d\xec\x3b\x30\x33\x30\x93\xaa\x8a\x2d\xe5\x43\x2d\x90\xa2\xcc\xe2\xcd\x3f\x69\x74\x41\xa8\x1b\x3c\xa5\xd9\x54\x21\x35\x77\x00\xbe\x1f\xfc\x8e\x7d\xa7\x66\x47\xca\x9f\x4f\x1c\x97\x1e\x0c\xa1\x2a\x10\x06\x7f\x5b\x0a\x65\x9d\xf0\x7d\xb3\xeb\x4c\xed\x31\xc6\x52\x66\xd1\xa9\x88\xed\xb2\xc7\xee\xe2\xab\x6a\x27\x04\xae\x33\xa1\x40\x91\x98\x2d\x16\x20\x0d\xe7\x14\x97\x20\x25\x8b\x41\x91\x85\x90\xb8\xb5\x99\x88\x91\x4d\x14\x5b\x5d\x93\x47\x4e\xc5\x0e\xb6\x49\x06\xd1\x00\x0a\x2d\x16\xc3\xbb\x70\x7b\x27\x53\x22\x1d\x3c\xc6\x34\xba\x40\xf5\x68\xbd\xfb\xe9\x06\xe8\x8e\xdc\xcb\x1e\xd1\x9d\x28\xea\x38\xdc\x13\x65\x40\xf2\x44\x15\x7d\xee\x5e\x7f\xe7\x94\xfb\x4c\xdb\x34\x2e\x62\x38\xea\x98\xfe\xd6\x12\x5e\xe0\x1f\x73\x50\xf8\x79\x31\x55\x14\x6c\xe2\x3c\x41\xde\x97\x27\xf5\x5d\x6e\x5a\x47\xcf\xb5\xf4\x5d\x8f\x7d\x0f\x16\x20\x25\xc4\x2f\x72\x83\xea\x67\xc5\xac\x1c\xe7\xb3\x3f\xbf\xbc\x86\x28\x6f\x22\xc7\xc6\xa5\x37\x20\x7e\xb5\x19\x85\xc3\x41\x02\x24\xb9\x62\x49\xe2\x66\x64\x58\x96\x7f\x60\x40\x82\x12\xa0\x81\xa0\xb2\x07\x8b\xa2\x9a\xa9\xc5\xba\x73\x00\x03\xd1\x02\xe6\x70\x6d\x84\x1b\xd4\x16\x91\x90\xd8\x82\x41\x4c\xe6\x6b\x27\xc7\x18\xa6\x3e\x21\xf3\x5c\x13\xa6\x51\xc8\x89\x56\x42\xa8\xcd\x43\x74\xbb\x51\xbb\xb5\x38\xaf\x4b\x26\x50\x46\x25\x82\x83\xe1\x86\xa9\x91\x4c\x1c\xbd\x56\x86\x9f\xe1\xca\xcb\xcf\xd8\xa6\x60\xb1\xdd\x52\x73\xe2\x15\xdb\xe5\x29\xc2\x0c\x73\xc5\xf4\x0a\xff\x58\x1a\x75\xc9\xc8\xc7\x2a\x4f\xcd\xa0\x57\xc0\x96\x2b\xad\x26\x84\xcd\x76\x08\x4d\x9b\xcd\x20\x20\xd0\x68\x55\x99\x56\x0a\xa0\x15\xa1\x49\xe2\x97\x50\xc5\x5a\x2b\x81\xa4\x46\x56\x24\x7b\x5e\x98\xec\x1c\xc5\x09\x82\x93\x42\x82\xd9\x44\xbc\x9d\xdb\x35\x21\xa0\xa3\xd9\xfe\xa4\xb3\xfb\x48\xa4\x59\xae\xc1\x48\xc0\x79\x6a\xb6\x96\x69\xa3\x83\x59\xc1\x57\x8a\x7c\x69\x21\x05\x89\x9b\xb8\x57\x5b\xec\x89\x6e\xf8\x1f\x8d\xe3\x5d\x3c\x7f\xb3\x3d\xb6\xc0\x7d\xec\x35\x13\x33\x1c\xb3\x40\x42\xf8\xa5\x54\x47\x2b\xa7\x2c\x45\x42\x4a\x50\x99\xe0\xa6\x67\xfb\xe4\x65\xb9\xb6\xbf\x9a\x77\x3a\xc7\x33\x9d\xee\xa9\xfd\x72\xb3\x57\x6c\xb9\xf2\x7b\x4d\x25\xe0\x6f\x75\x1c\xe9\xda\x72\xcb\x4a\xa8\x94\xb4\x8b\x8e\x98\x86\xb4\x83\x91\x90\x11\xd4\x4f\xc8\x11\x27\x90\x66\x7a\x5d\x41\xec\x0a\x8a\x69\x90\x69\x01\x48\xc4\x42\x64\x7b\xca\x02\x81\xa5\x59\xc2\x22\xa6\x1d\x9a\x93\x67\x3d\xc6\xdb\x33\x94\x40\x98\x36\x87\x06\xe1\x62\x2a\xb2\xfd\x19\x39\x22\x3c\x2f\x18\x4f\xdb\x14\xb8\x28\x66\xe0\x3a\x32\xd3\x52\xa2\xec\xab\x9b\x1f\xf5\x63\xdf\xb6\x35\x0b\xf9\xdb\x6d\xea\xe6\x0f\xbc\x07\x11\x9a\xd7\x2d\xd4\x3a\x5f\xed\x7b\x90\xf8\xb7\xfd\x1c\xfa\xbc\xbd\x79\xda\x5b\xca\x51\x90\x40\xa4\xcd\x69\x08\x32\x9d\x10\xaa\x94\x88\x98\xd1\x0a\x4b\xdc\xaf\x13\x94\x5d\x49\x37\xec\xc9\x50\xf8\x93\xc1\xeb\x27\x68\x78\xa8\xd3\x77\xdf\xef\xb6\xa0\x91\x30\xa3\xe3\x2c\x36\xa0\x52\xe3\xbb\xf3\x35\x3e\x7d\xa2\x48\x42\xe7\x90\xec\x50\xda\x9b\x5a\x7f\xe2\x2f\x5b\x4f\x36\xd0\xb0\xa0\x5e\x0c\xa1\x6c\x9b\xd8\x50\x59\xb7\x33\x71\x14\x78\x62\x8e\x2a\xa3\xda\x53\xc6\x95\xb3\xef\x4c\x08\x25\x17\xb0\xb6\x76\x38\xca\x0b\x53\xdc\xa0\x29\x60\xc7\x12\xec\x81\x6e\xf0\xee\x02\xd6\xd8\x61\x93\x0d\xa9\xa5\xab\xa1\x78\x67\xdb\x10\x0e\x50\xb6\xa9\x99\xe8\xc0\x2f\x46\x00\x68\x38\x69\xd8\x76\x01\xad\xe2\xf3\xae\xb6\x65\xa5\x46\x74\xc7\xfd\xc0\x4d\xc2\x13\xd8\xe3\x03\xcd\xb2\x84\xc1\x6e\x53\x53\x7b\x6b\xd5\xfa\xda\x9a\x87\xde\x8d\xd6\x35\x90\x40\x4c\x7b\x5f\x98\xeb\x2c\xbe\x3f\x51\x16\x5f\x0d\xdf\x59\xb1\xcc\x1a\x5a\x14\x20\x1b\x19\x8e\xb4\xb6\xfd\x44\x13\x56\xda\xb2\x15\x0a\x1b\x27\x7c\x42\xde\x0a\x6d\xfe\xf7\xf2\x9a\x29\x23\x63\xbe\x10\xa0\xde\x0a\x8d\x7f\xce\xc8\x2b\x6d\x49\xef\x75\x4f\xbe\x5c\xb6\xd1\x7b\x60\xd7\x77\xdf\x3b\x70\xc4\x2d\x13\x35\x10\xae\x1a\x5d\xd5\x8c\x9c\x58\x71\xb0\xf0\x02\x30\x45\x4e\xb8\x51\x0a\x2c\xe4\x06\x0f\x85\x76\x77\xec\xdb\x0d\x99\xe6\x0a\xad\xa6\x5c\xf0\x29\x8a\x51\x3b\xc7\xb4\x1b\x64\xc6\xad\x6e\xd1\x2d\x0e\xdf\x3c\xf4\x2b\xb4\xd1\xbd\xd6\x93\xca\xc7\x83\xc7\xad\x0c\xb6\xa2\x97\x28\xda\x33\xbe\x4c\x0a\x21\x7e\x42\xae\x56\x2c\x5a\x59\xed\x71\x0e\xd6\x39\x90\x49\x30\x12\x03\x55\x86\xf9\x9b\x5f\x96\x20\x87\xa3\xfe\xb9\x51\x0d\xec\xf8\xd6\xb5\x91\xd0\x08\x62\x12\xa3\xca\x62\xad\xec\x54\xc3\x92\x45\x24\x05\xb9\x04\x92\x99\xa3\x7f\x1c\xc2\x0f\x3b\x89\x6d\x1b\x7c\x1e\x57\x07\x1c\x41\x61\x84\x5c\x4f\x2f\xf2\x39\x48\x0e\x1a\xd4\xd4\xc8\x27\x53\x37\x7b\x2d\x52\x16\xf5\xee\xec\x56\xba\x41\x39\xeb\x7b\xa3\xb7\xdd\x93\x88\x85\x3a\x62\x10\xb1\x82\x88\x15\x44\xac\x20\x62\x05\x11\xab\x77\x0b\x22\xd6\x8d\x87\x0f\x22\x56\x10\xb1\xee\x5d\xc4\xaa\x75\x91\xd2\x6c\x68\x0f\xd6\x2e\x37\xc2\x10\xf8\x0f\x6b\xd0\xdd\xb4\xfc\xa1\xc0\xe7\x43\x58\xea\x26\x40\x23\xc7\x9c\xb9\xc3\xe9\x1c\xcd\x86\xcc\xfa\xeb\x25\xe5\x4b\x20\xcf\xa7\xcf\x9f\x3d\x1b\x62\x20\x74\xe8\xdc\xeb\x8b\x85\x0b\x4e\x60\x5c\xff\xf1\xab\x8e\x2f\x6e\xb0\x2b\x4d\xfe\x92\xfb\x71\xd4\x39\xce\x53\xf8\x66\x6a\x22\x72\x83\x2f\x0d\x8f\x31\x2e\x34\x49\x41\x13\xda\x2d\x93\x55\xcd\xee\x2c\x85\x49\xe1\xf4\x46\xb6\xe3\x22\x84\xbc\x53\x30\x26\x82\x3b\xd7\x8b\xd9\xfc\xee\xcd\x1d\xb5\x82\x08\xa8\x8d\x5c\x99\x83\x59\x45\xb7\x2b\x50\x13\x25\x52\x33\x6b\xc6\xb5\x67\x62\x66\x09\xe0\x37\x86\xec\xc1\x6c\x39\x23\x71\x8e\xdd\x52\xee\x42\x9e\xf6\xed\x6a\xd5\x5a\x69\x48\xbb\x7d\x81\xe6\x30\x94\xf8\x3f\x03\x16\x2d\xd7\xa6\x33\xb8\x04\xae\x73\x9a\x24\x6b\x02\x97\x2c\xd2\x05\xfc\x30\x42\x8b\x69\xd5\x0b\x52\x03\xc4\xe8\xfe\xa2\xf3\x74\x8b\x42\xbb\x0e\xa9\x21\x92\xef\x56\xdf\x7d\x78\x4e\x8d\x02\xde\xbb\x95\xcc\x1a\x75\x42\x6d\xfa\xb5\x6e\x5b\xfc\x27\x22\xf7\xbb\xf7\xdd\x5e\x36\x32\xf8\xfc\x19\x70\xe6\x8c\x13\xad\x9c\xc3\x4b\x48\xe7\x7c\xdb\x5e\xe9\x0e\x97\x97\x5d\x7b\x8d\x6a\xc4\xa2\xe7\x80\x7a\x05\xd6\x49\x79\xf4\xf6\x45\x3f\x88\x11\x17\x1c\x70\x2e\x32\x91\x88\xe5\xba\xba\xbd\x36\x44\x9a\xa5\x99\x77\xe2\x52\xa2\xf2\xb9\x13\xc1\x0d\xce\xbf\xdd\xc0\x87\xe0\x1a\x0a\xae\xa1\x60\xb7\xc0\x16\xec\x16\xc1\x6e\x11\xec\x16\xfd\x5a\xb0\x5b\xdc\x78\xf8\x60\xb7\x08\x76\x8b\xe0\x1a\xda\x6e\x41\xc4\xea\x6e\x41\xc4\x6a\x6d\x41\xc4\x2a\x5a\x10\xb1\x82\x88\x15\x44\xac\x20\x62\x05\x11\xeb\xbe\xba\xb9\xa9\x6b\xe8\x46\x53\x18\x37\x78\x26\xe2\x1b\x24\x6f\x65\x22\x6e\xc9\xdd\xb2\x36\xfd\x48\x4c\x13\x11\x51\xed\x32\xaa\xcd\x27\xce\x0b\xa5\x68\x6a\xdd\x14\x13\xf2\xab\xe0\x60\xb3\x59\x0c\x79\xa0\xb3\x40\xe8\x15\x48\xf3\xfa\x9e\xda\x6f\x4d\x21\x08\xb9\x5f\x21\xf7\x2b\xe4\x7e\x35\xb6\xcf\x26\xf7\x6b\x45\x95\xc5\x5b\x7b\x34\x36\xa7\x82\x55\x78\xd2\x39\xc8\xf4\x77\x9a\x09\x66\xd0\xdd\xa1\x23\xd6\x03\x29\x51\xca\x42\x26\x76\x0e\x7f\x88\x4f\xeb\xf0\x70\x6a\x35\x2e\x8a\xc6\x31\xc4\x24\x03\x39\xb5\x28\x2a\xc8\x82\xf1\x78\xc7\x5a\x3d\x7c\xba\xd9\xc3\x1d\xa6\x62\xd5\xd7\xd1\xeb\x9b\xbb\xc9\xc7\xaa\x4f\x64\x84\x53\xb1\xea\x19\xad\x1d\x82\x9f\x45\x76\xd6\x50\x0d\x7d\x4a\xb4\x73\x28\xfe\xd0\x53\x47\x1f\xae\x66\xa3\x72\xec\xdd\x8f\x23\xcd\x4f\x83\x54\xa1\x23\xa7\x8f\xff\x92\x83\x5c\x63\xdd\x80\x52\xed\x2c\xca\xce\xb8\x88\x18\xa6\x48\x44\x95\x3d\x56\x87\x88\xca\x27\x0b\x9b\x35\xc9\xf3\x24\x99\xd8\x7e\x36\x89\xd5\xb3\x39\xc4\x03\x2e\xcc\xf3\xc1\x16\xb1\x81\x26\x9a\x71\x36\x90\xf1\x5e\x58\xb2\xb9\x4f\x9b\x5d\x59\xdb\x98\x37\x1d\xda\x6d\xd9\x69\x3b\xdc\xe1\x4d\x1f\xec\x1f\xb7\x6d\xac\x02\x33\x4a\x7d\xb9\xb1\xbe\xde\x02\x93\x1b\xd8\x15\xf1\xe5\xc1\x93\xb9\x1d\xdb\x22\x19\x6f\x5f\x24\xa3\x6d\x8c\x64\x94\x9d\x91\x8c\xb5\x35\x92\x1b\xd8\x1b\xc9\x38\x9b\x23\xd9\xc4\x36\xb3\x43\x4e\xf0\xbd\x1b\xf3\x23\xb9\x99\x72\x3e\xde\x0c\x49\x6e\x81\xb0\xaa\xe3\x57\x8a\x78\xdd\x9d\x5d\x92\xf4\xb5\x4d\x22\x59\xd5\xcc\x93\xf7\xbd\x2f\xe3\x4c\x93\xe4\x96\x76\xc5\x19\xed\x18\xda\xc2\xee\xcb\x58\x49\x1e\xde\x60\xd9\x3a\x05\x37\x7c\x6f\x0b\xdf\xa8\xd1\x6f\x60\x15\x24\x37\xb2\x0c\x92\xf1\xd6\x41\x72\x53\x64\xbf\x35\x2b\xe1\xad\x76\x85\x72\xd2\x6b\x8c\x43\xbb\x81\xb4\x35\x98\x02\x2b\xc3\x5a\xe9\x02\x6b\x94\x2d\xc8\xbf\x8d\x10\x81\x88\xf9\x1f\x92\x51\x26\x95\xd1\x6e\x9c\x5d\xbb\xfa\xcc\x99\xef\x2a\xdd\x0c\x9e\x00\x16\x69\x33\x67\xfc\x25\x4d\x8c\x90\x63\x83\x7e\x9d\x0d\xc2\xcc\x65\x53\x84\x9c\x90\xab\x95\x50\x56\x22\x29\xea\xcc\x3d\xbe\x80\xf5\xe3\x49\x2f\xf3\x40\xbd\x55\x29\xfb\xf1\x09\x7f\x6c\x45\xa7\x2d\xba\x2c\xe4\x2c\xc1\x93\x35\x79\x8c\xcf\x1e\xdf\xb6\x8c\x3a\x42\x3e\xaa\x16\xf3\x1d\x2b\x7e\x8c\x22\xa7\x9b\x1a\xb8\x49\x0d\xfb\x7e\x80\xf5\xd8\x40\x84\x41\x38\xff\xa6\x36\xa2\x17\xa8\x11\xcd\x8c\xee\x5e\xc8\x4d\x68\x51\xb5\x22\x93\xb3\x51\x59\x73\x15\x4b\x92\x01\xa3\xcd\x81\x68\x7a\x01\xe8\xe1\xc1\xda\x8e\x8a\xc5\x68\x70\x13\xdc\xa2\x0e\x8e\x64\x50\xc6\x17\x21\x4c\x84\xb8\xc8\x33\x8f\x7a\xbe\x70\xe8\x80\x21\x19\x8f\x44\xea\x63\xea\x6d\x5c\xab\xa1\x0a\x47\x2f\x53\x5b\xee\xd4\xfe\x8e\x03\xe3\x11\xe0\x4c\x13\x9f\x6a\x8a\xf9\x27\x42\x15\xf9\x84\x42\x25\x27\x7b\xf8\xe1\xfe\xa7\x21\x21\x05\x05\x00\xad\xf5\x55\xe4\xc8\x59\x8a\x82\x96\x15\xfb\x5f\x01\xdb\x36\x88\x0d\x18\xda\x57\x8a\xab\x02\xc3\xd6\xc2\x23\x7b\x94\x6b\xb6\x5f\x16\xc4\x23\x88\x07\x28\x25\xc7\x82\x3f\xd1\x76\x7e\x9e\xaf\xf9\x0e\x86\x44\x1d\x14\x70\x2f\x3d\x7e\xd6\x5b\x60\xb7\x3c\x86\x05\xcd\x13\xed\x0a\xcf\x1a\xd6\x87\x27\xfd\x80\x11\xce\xbd\xbf\xc4\x09\xfc\x0b\x21\xe7\x2c\x8e\x81\x63\x8a\x83\x9f\xfe\x5c\xf8\x84\xa0\x12\xdd\x0d\x67\xab\xed\xf1\x90\x61\x8f\x12\x25\x26\x9b\x3d\x46\x45\x61\x5a\x43\x45\x58\x3e\xb1\x36\x00\x61\xca\x00\xb5\xa1\x52\x62\xf3\x0a\x99\xa3\xcd\x39\x68\x5a\x31\x3f\x3b\x56\xab\x08\x70\x3a\x47\x7b\x78\x9d\xa0\x4f\x78\xc5\xe6\x42\x16\x40\x75\x2e\x81\x2c\xa9\x06\xb2\x87\x9f\x58\x8f\x86\xdb\x83\x1e\xd5\xa8\x7c\xbb\x97\x70\xa4\x9b\xf3\xe1\xd1\xe1\x60\x4c\x3d\x00\x2f\xde\x1c\x34\xb0\xe3\x5e\xec\x98\x0b\x1d\x38\xf2\x17\xcb\x91\xb7\x90\xfe\x36\x98\xf2\x56\xa7\x81\x2f\x37\xb7\x2f\x88\x2f\x73\x7f\x8f\xc7\x03\x7b\x4b\xac\x01\xd1\x46\xfb\xfa\x29\xa9\xd2\xb6\x88\x09\x71\xe3\xec\x8a\x3e\x5d\x10\xd1\x0a\x7b\x88\xbd\xd3\x38\xe7\xee\x7e\x04\x5f\xc1\xdb\x0d\x6b\x79\xd8\x66\x6c\xc2\x80\x31\xbd\x82\x27\x38\x28\x74\x45\x40\x11\x87\x52\x19\x06\x7b\x1d\xb2\x14\x4c\x19\x2c\xcd\xac\x3c\xde\x4c\x22\x2c\xfb\x46\xff\x47\x0a\x94\x2b\xf2\xd8\x87\xc2\x3c\x51\xe5\x1b\x8f\x07\xf1\x00\x5f\x22\xb4\x18\x7b\xef\xdf\xff\xd9\xaf\x95\x05\x2d\x87\x0e\xfe\xa6\xe0\x6f\xaa\xb6\xe0\x6f\xda\x9e\x44\xf0\x37\x35\xb5\xe0\x6f\x1a\x35\x7e\xf0\x37\xd5\x5b\xf0\x37\x05\x7f\x53\xf0\x37\x05\x7f\x53\xf0\x37\x05\x7f\x53\xdf\x8f\x6e\xc3\xdf\x54\xaa\x41\xf7\xa1\x47\x57\x55\x56\x17\x4f\x6f\x2f\x9e\xa3\x9a\x45\x65\xf6\xac\x7f\xcb\xfe\xeb\xa1\x94\xea\xaa\x1a\x7c\x53\x95\xba\xaa\xa0\x6f\x59\x30\x06\x6b\xd4\x8d\xfa\x73\xa1\x61\x6f\x8d\x71\x4b\xaa\xf5\xef\xdc\xb8\x54\x09\xf4\xbd\x0f\x72\x38\xf7\x29\x3f\xee\x2e\xcb\x39\x94\xf9\x40\x31\xd9\xf3\xf6\xdf\x7d\xb3\xe1\x5c\xe8\xfa\x43\xae\xd9\xb4\x7c\xa3\x08\xa4\x46\x33\xb5\xaf\x7a\x37\x04\xe8\x5e\x67\x29\xed\xa1\x2e\x0f\xa9\x48\x7c\x29\xb1\xcd\xf0\x7f\x90\xb5\xd9\x32\xe5\xee\xfa\xc4\x14\x3e\x99\x73\x6e\xc4\x23\xc1\x5d\xb6\xcb\x80\x99\xd8\xa3\xc5\x9a\x9e\x1d\x55\x5a\x65\x0a\xd7\x88\x1a\x55\xb9\x4d\x95\x44\x09\xaa\xed\xcd\xa1\xae\x36\x95\xe0\xce\x70\x6f\x7e\xb1\xfd\x0c\x98\x44\x41\xb0\x08\x4f\x56\xac\x68\x08\xad\xbe\x44\x1a\xad\x4e\x96\x29\xdc\x47\x9a\x24\xe2\x6a\xc8\xf9\x34\x90\x22\x46\x17\x51\xec\x8d\xbd\x57\x83\xab\x2d\x6e\x64\x15\xf4\x95\xd5\x43\x49\xc6\xd6\x16\x4a\x32\x7e\x1e\x25\x19\x2b\xbe\xd9\x6a\x6d\xc6\x6e\x58\x61\xed\xc6\x3b\xad\xcd\x48\xc8\x3f\xdc\x45\xa4\x12\xac\x43\x35\x4f\x34\xcb\xca\x6c\x6c\x65\x77\x28\xb1\x2a\xf5\xc2\x65\x4d\xd6\xa9\xd7\xcc\x86\x46\xab\xce\xa1\x36\xa8\x1c\xc7\xc3\xec\x6e\x85\xdc\xd4\x66\x16\xa2\xfd\xdd\x16\x44\xf4\xba\xb6\x4d\xcf\x64\x0f\x9d\x75\xd6\x8b\xff\xbd\x70\x57\x5a\x57\xfd\xf2\x8a\xec\x99\xd3\x31\x59\x3b\x4f\x76\x8d\x11\xd6\x8e\xd5\x1e\x03\x58\x3b\xd8\x25\x78\x01\x75\xc9\x2e\x81\x97\xa7\xef\x9e\xda\xdf\xf7\x32\xf1\xa6\xfc\xd0\xa3\xf7\x9b\x48\x18\x7d\xb8\xf6\x50\xc9\x60\xe3\xbc\xef\x31\xc2\x0e\x89\xe0\x9b\xca\x29\xfb\x6d\xb7\x4c\xd0\x63\x10\x4b\xd2\x3e\x8d\xb5\xb2\xd1\xa5\x2c\xd0\xd9\xcb\x1d\x66\x0b\x0e\x49\x49\x1b\x66\x15\x1f\x91\x8a\x36\xb6\xa2\xe8\xdd\xa6\xa0\xdd\x69\xfa\xd9\x97\x53\xf8\xf3\x81\xdd\x7f\x5f\x40\xd5\xaa\xcf\xc4\xdd\x17\xca\x56\x35\xb5\x87\x2a\x5b\x75\xe7\xee\xbc\x2f\xae\x7a\xd5\xbd\xba\xef\xee\xc7\x75\xf7\x85\x55\xaf\x7a\x10\x57\xdd\x67\x5e\xc7\xea\xee\x5c\x74\xa1\x48\xd4\xc3\xd4\xe1\x1c\xea\x86\x1b\x4f\x55\x0f\xea\x7e\x7b\x50\xd7\xdb\xc3\xbb\xdd\x46\xc9\x2a\x37\x75\xb7\x0d\x26\x93\x9b\xba\xd9\xc6\xa4\x11\x8c\xc3\xe7\xfb\x4b\xe5\xba\xe7\xbc\x81\xcf\x23\x85\xeb\x81\x92\x05\x1e\x2a\x51\xe0\x6e\x93\x04\x1e\x20\x65\xeb\x9e\xd2\xb5\x3e\xaf\x94\x80\xa1\x62\xc8\x20\xe1\xe3\x66\xbc\x74\x8c\x84\x30\x32\x2d\x6b\x24\x3f\xbd\xcf\x74\xac\xdf\x01\x4b\x1d\x95\x86\x15\xb8\xea\x03\x71\xd5\xdb\x4b\xbb\xba\xbf\x94\xab\xc0\x5b\x9b\xdb\x8d\x79\xeb\xc8\xd4\xaa\x5b\xb3\xfe\xdf\x4d\x4a\xd5\x7d\xa7\x53\xdd\x41\x2a\xd5\x43\xa4\x51\xdd\x41\x0a\x55\xf0\x99\xf4\x6c\xc1\x67\xd2\xb7\x05\x9f\x49\x53\x0b\x3e\x93\xcd\x16\x7c\x26\xc1\x67\x12\x7c\x26\xc1\x67\xb2\x3d\x60\xf0\x99\x04\x9f\x49\xbf\x16\x7c\x26\xf7\xe3\x33\x19\x9a\x96\x34\x0e\x97\x1f\x26\x1d\xe9\x7e\x53\x91\x6e\x3f\x0d\xe9\x01\x53\x90\x7e\x67\x06\x97\xc1\xe9\x46\xe3\xd0\xfc\x73\x49\x33\xfa\x3c\x52\x8c\x1e\x3c\xbd\xe8\xa6\xa9\x45\xb7\x93\x56\x34\x00\xdb\x47\xe2\x79\x26\xe2\x23\xae\xd9\x4d\x2f\xd0\xaa\x22\x60\xd3\x2d\x5a\xf4\x52\xb0\x98\x64\xb9\x76\x17\xf7\x84\x9b\xb4\x3a\x71\xe0\x7e\x6e\xd2\xaa\x6d\x5e\xb8\x4e\xab\xad\x7d\x36\xd7\x69\x35\xed\x59\xb8\x53\xab\xde\xc2\x9d\x5a\xe1\x4e\xad\x70\xa7\x96\x6d\xe1\x4e\xad\x70\xa7\x56\xa8\x71\x18\x6a\x1c\x86\x1a\x87\xfd\xbf\x0a\x35\x0e\x9b\x5b\xa8\x71\x38\xa4\x85\x1a\x87\xbd\x47\x0f\x35\x0e\x43\x8d\xc3\x61\x03\x87\x1a\x87\x24\xd4\x38\x0c\x35\x0e\xbf\xe0\x1a\x87\xe1\x4e\xad\x2f\xe2\x12\x97\x70\x83\xcb\x80\xb1\x3f\xaf\x1b\x5c\xc2\x9d\x5a\xe1\xee\x96\x1e\x2d\xdc\xa9\xf5\x05\xb1\xe3\x70\xa7\xd6\x97\xcc\x91\xc3\x9d\x5a\x81\x2f\x87\x3b\xb5\x8a\x16\xee\xd4\x0a\x77\x6a\x05\x7f\x13\xb6\xe0\x6f\x0a\xfe\xa6\x21\x2d\xf8\x9b\xba\x5a\xf0\x37\x05\x7f\x53\xd3\xe8\xc1\xdf\x14\xfc\x4d\xc3\x06\x0e\xfe\x26\x12\xfc\x4d\xc1\xdf\xf4\x05\xfb\x9b\xc2\x9d\x5a\xe1\x4e\xad\x70\xa7\x56\x31\x72\xb8\x53\x2b\xdc\xa9\x85\x2d\xdc\xa9\xd5\x63\x84\x70\xa7\xd6\x97\x7a\xa7\x56\x2d\x6f\xea\xcb\xbd\x58\x6b\xf8\x32\xc2\xed\x5a\xe1\x76\xad\x86\x16\x6e\xd7\x0a\xb7\x6b\xed\x6a\xe1\x76\xad\x70\xbb\x56\x4b\x0b\x95\x22\x7b\xb6\x50\x29\xb2\x6f\x0b\x95\x22\x9b\x5a\xa8\x14\xb9\xd9\x42\xa5\xc8\x50\x29\x32\x54\x8a\x0c\x95\x22\xb7\x07\x0c\x95\x22\x43\xa5\xc8\x7e\xed\xe1\x1d\x70\xff\x33\x2a\x45\x86\xdb\xb5\x3e\xcb\xab\x60\xc2\x3d\x30\x1d\xed\xf3\xb9\x07\x26\xdc\xae\x15\x6e\x80\x71\x2d\xdc\xae\xf5\x19\xb3\xd4\x70\xbb\x56\x77\xfb\x7c\xb8\x6a\xb8\x5d\x2b\xf0\xd6\x5a\x0b\xb7\x6b\x85\xdb\xb5\x8a\x16\x6e\xd7\x0a\x3e\x93\xc6\x16\x7c\x26\x24\xf8\x4c\x8a\x16\x7c\x26\xbd\xc6\x0d\x3e\x93\xe0\x33\x09\x3e\x93\xf6\x49\x07\x9f\x49\xf0\x99\x74\x0e\x1e\x7c\x26\xbf\x7b\x9f\x49\xb8\x5d\x2b\xdc\xae\xb5\xa3\xfd\xce\x0c\x2e\xe1\x76\xad\x70\xbb\xd6\xef\xf3\x76\x2d\xb8\xd6\x92\x46\xfa\x58\x70\x0d\xbc\x31\x27\xa9\x2f\x3a\xbf\xac\xf5\x66\x4e\xd7\x05\x5b\xe6\xd2\xe9\xfd\xcb\xf7\xa7\xc7\x24\xa2\x9a\x26\x62\x49\x4e\x45\x6c\x4d\xdd\xf8\x45\xf1\x73\x0a\x9a\xc6\x54\xd3\xc2\x4b\x62\xf4\xe3\x4b\x16\x23\x53\x8d\xe1\x9a\xb0\x94\x2e\xc1\x30\xaf\xc6\x49\xe4\x0a\x08\x25\x57\x90\x24\xd3\x0b\x2e\xae\x38\xb9\x04\xa9\x2a\xec\xfa\x93\xc8\xd2\x4f\x44\x81\xbc\xb4\x37\x54\xc1\x75\x66\x10\x8d\x69\x7b\xee\xfb\x99\x54\x87\x2b\x83\xf8\x8f\xed\xd3\x33\x0c\x7a\x6e\xbb\xec\xa9\x58\x3b\x2e\xd3\xcc\xe9\xa9\x11\xec\x9f\x1a\xa2\xce\x95\xcf\x38\x58\xb0\x04\xa6\x73\xaa\x20\xf6\xe3\x2a\x43\x6b\x42\xc6\x76\x6e\xb9\x66\x09\xfb\x15\xdc\x69\x62\x8d\xe1\x4d\x68\xd3\x43\xe0\xe8\x36\x84\x4c\xfd\x3c\x5e\xb0\x26\x23\x46\x1f\x83\x45\x44\xa3\x15\xbc\x60\xad\x2a\x7c\x0d\xa9\x8e\xdd\x07\xde\x24\xb1\x27\x32\x2b\x07\xed\x93\x98\x49\x64\x42\x6b\xa2\xb4\x90\x1e\x72\x99\x84\x69\x44\x93\x28\x4f\x90\xe5\x1c\x9d\x9e\xd8\x41\xbb\xaf\x5d\xeb\x20\xa5\x72\xfd\x03\x26\xef\x3f\xf1\xd3\xdf\x3d\xe7\xed\xdd\x46\x01\x14\xed\x91\x37\x99\x76\x0a\xa9\x90\xeb\x73\x2a\x97\x70\x63\x12\x7e\x53\xe9\x6b\x93\x80\xff\xeb\xd5\xbb\x37\x2f\xdf\xbc\x3e\x79\x73\x72\xee\xf8\xb2\x77\xce\x6d\x92\xf6\xac\xe2\x00\x52\x62\xa1\xdd\x14\x49\xc2\x52\xa6\x8b\xaf\x2c\x0d\x36\xab\xcc\x96\x6f\x63\x82\x61\xce\x35\x4b\xc1\x7a\xe2\xa8\xd6\x46\xa4\x31\xf4\x91\x02\x68\xbc\xe7\x2d\xa5\x17\x60\x98\x2b\x59\xe6\x54\x52\xae\xc1\x1f\x05\x4c\xdb\x8f\x62\x41\x94\x70\x8a\x3c\x53\xa5\xd7\x4e\x81\xb6\x69\x55\xa7\xa2\x99\xa5\x60\x0f\x2b\x7a\x69\x2f\xe2\x5a\x08\xc3\xbf\xcd\xa6\xa6\x22\x66\x0b\x16\x59\xcb\x10\x49\x69\x5c\xa4\x02\x39\xc5\x02\x64\x71\xfc\x95\x0b\x6e\xa3\xbe\x4d\x30\x03\xbf\x64\x52\x70\x54\x98\x2e\xa9\x64\x74\x9e\x40\xe1\x8f\x54\xa0\xed\x78\xe5\x82\x38\x99\xaf\x35\x34\xb3\x25\x3b\x82\xdb\x0d\x77\x83\x5b\x73\x7f\x8f\x1a\xfb\x39\x2f\x73\xf6\x4a\x61\xc5\x7c\xcf\x5c\x32\x47\x0c\x8a\x39\xe6\x27\x21\xce\x23\x0f\x3a\xa1\x33\xc9\xac\xf6\x47\x0b\x8c\x71\xcc\x98\x2a\x92\xe6\xe6\xa4\x36\x52\x90\x52\x6c\x9e\xc0\xc4\xc8\x3a\xac\x39\xd9\xa8\xec\x63\x0e\x06\xca\xd8\x13\x4a\x20\x97\x60\xf0\xcd\xe0\xb1\x15\x75\x01\x8c\x20\x24\xf0\x32\x35\x6a\xc5\x19\xef\xc2\x35\xe7\x71\xe4\x1c\xf8\x27\x0b\xb2\x16\xb9\xac\xb1\xff\x15\x35\x78\x8c\xd4\xdb\x38\x11\x97\x20\x88\x3c\x68\x42\x62\x30\x8a\x03\xe3\xe6\x24\x5a\x0a\x11\x1b\xfd\x41\x8a\x6b\x96\xe2\x28\x8e\x00\x8a\x5d\x9b\xaf\x49\x2c\x72\xeb\x0d\x45\x34\x31\x2c\xdf\x9d\x56\x19\x8d\x2e\xcc\x1c\xb0\xe3\xb6\xd4\xce\x03\x9d\x66\x07\xf8\x96\xfb\xaf\xfb\x52\xcd\x7e\x56\x82\x97\x4e\xf0\x62\x59\xb3\x5e\xbb\xcb\x14\x99\x83\xd2\x53\x58\x2c\x84\xd4\x7f\x35\xfb\x9b\x73\x24\x1a\x2e\x0a\x00\x7a\x04\xc2\x80\x07\x84\x36\xa6\xe8\xd4\xa9\x5e\xc8\x1d\x0c\xa4\x82\x7a\x4d\x2c\x30\x33\xf4\x2e\xf9\x21\xf9\xbf\x7b\xff\xfa\xc3\x6f\xd3\xfd\xef\xf6\xf6\x3e\x3c\x9b\xfe\xe5\xe3\x1f\xf6\xfe\x35\xc3\x7f\x3c\xdd\xff\x6e\xff\x37\xff\xc7\x1f\xf6\xf7\xf7\xf6\x3e\xfc\xf0\xe6\xd5\xf9\xe9\xcb\x8f\x6c\xff\xb7\x0f\x3c\x4f\x2f\xec\x5f\xbf\xed\x7d\x80\x97\x1f\x7b\x76\xb2\xbf\xff\xdd\x7f\x35\x4c\x88\xf2\xf5\xbb\x45\x2b\x11\xf7\xca\x6e\x9e\xf6\x39\x8f\x6a\x22\x1d\xe3\x7a\x2a\xe4\xd4\x7e\x70\x48\xb4\xcc\x77\x8b\xb2\x46\xee\xed\x72\x0d\xf7\x3d\x0f\xde\x56\xfa\xda\xf0\x18\xb9\xab\x10\x9d\xb9\xd2\xcc\xa6\xe0\xec\x99\x95\xee\x16\x66\xdb\xbd\xd8\xdf\x7c\xc4\x9d\xed\xe8\x11\xe5\x79\xf7\xe5\x13\xe5\x23\x31\x36\xfa\xdf\x48\x52\xb6\x1c\xbf\x6d\xac\x1e\x32\xd2\x30\x13\x4c\xe7\x16\x66\x92\x09\xc9\xf4\xfa\x38\xa1\x4a\xbd\xa5\x29\xdc\x74\x43\x4e\x16\xa5\x1a\x36\x31\xf4\x6c\xce\x1f\x77\x40\xbb\xd0\x18\x37\x64\x33\xc0\x4f\x16\xa8\x87\x54\xfa\xf1\x40\xf5\xdf\x16\x84\xe9\x49\x5c\x48\xf2\x2b\x48\xe1\x2e\xc5\x94\x60\x75\x99\xc6\x11\xdc\x67\xed\xfb\xd0\x02\x36\x05\x51\x8e\x60\x33\xf2\xd1\xb5\xd1\x28\x16\x6c\x79\x53\xd0\x9d\xed\xea\x94\x44\x94\x9b\x85\xe2\x35\xaf\x0b\xf2\x29\x81\x25\x8d\xd6\x9f\xcc\x82\x3f\x49\x30\x53\x34\x3a\xe0\x27\xab\x1c\xd4\xc4\x7f\x17\x85\xc4\x14\x01\x86\x37\xff\x32\xfe\xb3\x55\x18\xbd\xf6\xdd\x38\x13\x89\xf5\x19\x32\x11\xcf\xcc\x1e\xcc\x36\x56\x8b\x2c\xb4\x78\x58\x88\x12\x1f\x9e\x7e\xdc\x7a\xd3\x59\x33\xb5\xb0\x4a\x65\x95\x38\x64\x8e\x5c\xbf\x4d\xae\xf1\x00\x21\x47\x71\xca\xd0\x04\x4b\xf6\x4e\xcf\x8e\xf6\x6b\x2b\x37\x52\x8e\x3d\x87\x63\x01\x3e\x0e\xc8\x0c\xa4\x4a\x63\x2a\x9e\xa1\x98\x02\x6a\x49\x18\x73\x40\xfd\x5c\x0c\x80\xd1\xbe\xda\x92\xf6\xec\x27\x7b\x76\x44\x3e\x19\x09\x39\x61\x1c\xec\x1e\x64\x92\x5d\xb2\x04\x96\x66\x26\x95\x40\x06\xef\x92\xd9\xbd\xa7\x4c\x99\x53\xaa\x8e\xde\x29\xe6\x30\x5b\xb4\x6e\xc1\x5b\x77\x70\xbb\x98\x92\x8a\x75\xef\x89\xc2\xe9\x79\x99\xb8\x94\x17\x6a\x6f\x21\x3a\xf0\x85\x90\x91\x39\xcd\x77\xc0\x51\xbb\x24\x7f\x03\x9a\x76\x81\x13\xcd\x3f\x85\x16\x49\x95\xa1\xbd\x5a\x87\x57\xb4\x52\x49\x61\x46\xde\x19\x24\xbc\x62\x0a\x26\x85\xd4\xbb\xb3\x0b\x8f\xe1\x57\xb4\x59\x4e\xac\x74\x7b\x86\xff\x5c\x5b\x6f\x93\x33\xc7\x20\xba\xa3\x1c\xd5\x44\x2f\x44\x82\x32\x7c\x83\x71\xfb\x15\x8b\xad\xe8\x03\x52\x0a\x39\xb3\xa5\x0e\xac\x1e\x2c\x92\xb8\xe5\x94\x2c\xd4\x71\x23\xb2\xa0\xb1\xc9\xe2\x17\x47\x0e\xe6\xd0\x66\x37\x98\x1b\x70\xa3\x2e\xaf\x6a\x51\x2c\xa1\x45\x20\x7a\x23\xb0\x3e\x82\x2d\x42\x62\x96\x41\xe7\x22\xd7\x16\x1f\x2c\xfb\x58\x88\x9c\xc7\xc4\x70\xc6\x43\xb2\xd2\x3a\x53\x87\x07\x07\xe5\xd1\x3d\x63\xe2\x20\x16\x91\x3a\x88\x04\x8f\x20\xd3\xea\xc0\x13\xf2\x41\x26\xe2\xa9\xff\x63\x4a\x3d\x1d\x1e\x8c\x65\x9c\x84\x00\xcf\x5b\x6e\x85\x9d\x12\xbb\xda\x96\x17\x4a\x60\xee\x7c\x49\x8b\xc4\x45\x70\x36\x9e\x8b\xf5\xbb\x82\xcb\xf7\x8b\xbb\x8c\x0b\xc1\xbf\xc2\x48\x9f\xa8\x6a\xd7\xed\x27\x47\x9b\x65\xb9\xc3\x96\xdc\xdf\x34\x7b\xee\x79\xa9\x91\x86\xcb\x55\xa0\x14\xa4\x35\xc5\x6b\x8e\x8d\xba\x64\x9f\x18\x36\xc8\xd7\xc4\xb0\x6a\xed\xee\xdc\xb6\x26\xc9\x36\xcd\x7e\x65\xe4\x26\xac\xf1\xf1\x4d\xe1\x76\x9b\xc0\x62\x01\x91\xfe\xb6\x62\x26\x2a\xaa\x54\x14\x6e\xad\x6f\xfc\xbf\xbe\x6d\x3e\xe5\x7b\x79\xa0\xfa\x85\x9f\xd8\x29\xb5\xdb\xbe\x87\xd9\xbc\x5f\x62\x8f\x1b\xf2\x8b\x05\x9e\x1d\x0c\xd5\x7b\xf4\x4d\x3b\x43\xaa\xf5\x4a\x38\xb9\x30\x49\x6a\x2f\x77\x06\x60\x20\xbf\xa9\x1c\x08\xce\x04\x5b\x7a\x03\x81\xbc\x15\xae\xdc\x0f\x4c\xc8\x29\x5e\x3d\x5d\xfe\x82\x27\xf2\x5b\x61\x0b\xff\x74\x54\x72\xe9\x69\xb0\xed\x8c\xdf\x19\x06\xcf\x1f\xca\x70\x1e\x0b\x98\x5a\x38\x4f\x49\x58\x55\x3f\x58\x2b\x60\x2f\x60\xdd\x09\x55\x77\xf8\xb9\x50\x22\xf4\x33\x4d\x4a\x1c\xf5\x9a\x81\x8d\x94\xf8\xab\xab\x0e\x21\xd2\x39\xe3\x76\x2a\x76\x60\xbf\xcf\x38\xb6\xdf\x0f\x1e\xe3\x9f\xdd\x93\xe8\x09\xed\x7e\x31\x45\xc3\x40\xfe\x6e\x40\xbc\x50\xe1\x79\xee\x02\xe9\xae\xb8\xa0\x4a\x30\xd0\xcb\x5f\x72\x9a\xcc\xc8\x0b\x2b\x5e\x23\xf0\xec\x4f\x5d\xe4\x66\xbb\xd8\xf2\xc7\x5f\xb1\x24\x8e\xa8\x8c\x51\xb3\xb2\xec\x87\x28\x61\x11\x87\x7a\xe9\xad\xa3\x6f\xcf\x00\x4b\xe4\xb1\x97\xc2\x93\x8c\x4a\xcd\xa2\x3c\xa1\xd2\x30\x7c\x58\x0a\xd9\x11\x1f\xdf\x73\x33\x4b\x6c\x3e\x83\x48\xf0\xb8\xc3\x73\x38\x6c\x57\xcf\x37\x3b\xaf\x6e\x2f\xca\x6d\x20\x99\xab\x38\xc3\x52\xd8\x24\xaf\xbd\x9a\x56\xdc\x31\x96\x58\x78\x66\x57\xf0\x96\x89\x15\xae\x8c\x18\x57\x2d\x8c\xc5\x94\xcf\x3e\xd8\xaf\x1c\x38\x05\xb5\xcf\xc8\xdf\x8b\x98\xf8\xae\x10\x24\xa6\xbd\x1f\x0a\xcd\x38\x6e\xbe\x8e\x14\xdd\x4e\x96\x6c\x64\x21\x24\x5c\x82\x24\x7b\xb1\xc0\x6f\xb0\xc0\xd5\xfe\x8c\xfc\xd3\x28\x83\x6d\x6e\x18\xdb\x38\x2c\x6d\x89\x24\x47\xd8\x45\x52\x88\x04\xb4\xdb\x53\x45\x9e\x91\x3d\x5b\x35\x8b\xa5\x29\xc4\x8c\x6a\x48\xd6\xfb\x5e\xfa\xb6\x66\xb5\x3e\x58\xd3\xa7\x98\x5c\xa5\x88\xdc\xd7\xff\xdd\xf2\x26\x4e\xf6\x36\x91\xea\x27\x6f\x6e\x2e\x01\x6b\x25\xe9\x0d\xec\x29\xbc\x98\x9d\x41\x0a\x8d\x11\x69\x93\x92\xd7\x54\x44\x5d\xcf\x9b\x0b\xdc\xfa\xd9\x20\x28\x25\x12\x96\x48\x9f\x96\xe6\x6e\x40\x9d\x2c\xda\x5d\x0e\xaf\x43\x08\x69\xf7\x48\x4d\x89\xd1\x05\xbf\xfe\xef\x98\x6a\xda\xf0\x82\x45\x99\x75\xb6\x8b\xd4\xba\x64\x9b\xb2\xf3\xa6\xbd\xee\xe1\x7a\x71\xc3\x8f\xea\x01\x95\x9a\x5d\x5f\xf6\xc1\xae\x13\xb4\x4d\xdb\x58\x49\x8f\x06\x53\x09\x4b\xa6\xb4\x5c\x57\x1c\x10\xce\x85\x29\x08\xe3\x4a\x53\xae\x19\xb2\x6a\xe2\xdf\x9c\x3a\xe3\xfb\x15\xd3\x0d\x61\x80\xef\x8c\xee\x8e\xa6\x5d\xcc\x0c\xb2\xc6\x8f\xf3\x75\x06\xe4\x6f\x95\x3f\x5e\xc9\x2c\xda\xfd\xfd\xc9\x82\x38\x06\x6a\x71\x93\xc6\xb1\x04\xb5\xcd\xd9\x76\x7d\xdd\x0a\x3e\x6f\xac\x1a\x0b\xc1\x53\x6f\xec\x72\xe9\x49\x4a\xb1\xa5\x51\x52\x7c\xfd\x4c\xef\xcb\xa9\x29\x2b\xe6\x57\x37\xb0\xf5\xcc\x42\x5a\x9c\x98\x4c\x7b\x75\x30\x12\x5c\xe5\x69\x69\x45\x88\x21\x03\x1e\x03\x8f\xd6\x58\x32\x2b\xb9\x84\x86\x30\x9e\x1f\x55\x03\x4a\x10\xf2\xbf\xd9\xd2\xa8\xdd\x6e\x72\x55\xc9\xd9\x7b\xa7\x37\x66\xca\x94\x01\xfc\x02\xa4\x51\xfe\x31\x73\xc7\x08\xbd\xbe\x87\x8a\x17\xd2\xd5\xf0\xf2\x41\xa5\x9b\x93\xc5\x42\x8a\xbb\xa7\x7b\x5e\x54\xff\xf4\xfe\x0a\x0f\x53\xcb\x81\x0c\x38\x96\xc2\x7a\xf3\x33\xa1\x98\x2f\x97\x57\x9c\x0b\xb5\x0a\xa2\x62\x61\xeb\x7b\x36\x8f\x55\xcf\xba\xc3\x40\xec\x8d\x45\xa3\xed\x2d\xe7\x76\x33\xa1\x6a\xd1\xf4\xbc\xb0\xa1\x46\xe9\xf9\xf6\x56\x17\x51\x34\x98\xaa\x57\x5f\x5a\x79\x96\x49\xca\x2f\x20\x26\x09\x5c\xb3\x48\x2c\x25\xcd\x56\x2c\xc2\x62\x90\xd6\xd5\x6b\x34\x46\x6d\x43\xa8\x9a\x31\xbc\xe9\xf4\xca\xf2\x79\xc2\xd4\x6a\xb7\xd3\xb0\x95\x38\x14\x44\x12\xf4\x4e\xce\xd7\x87\x36\xce\xec\xe7\xa5\xf0\xe3\x03\xde\x5d\xbf\x2e\x5f\xc4\x62\xbb\x4f\x74\xa5\x51\x64\x08\xdb\x3b\x40\xc1\x49\x82\x15\x22\x6a\xe0\x10\xda\xfb\x99\x4c\x2f\x17\x00\x99\xc5\x67\x0c\x54\x53\x29\xda\x16\x15\xe3\x11\x60\x71\x4b\x57\xa4\x14\xc0\xfb\x00\xb4\x64\x60\x25\x58\x40\xaf\x9f\xdf\x45\xe0\x7a\xb7\xc4\xd9\x6e\x44\x68\x31\x20\xb4\x43\xbc\xe0\x85\x9d\x40\xaf\xf0\x50\x2f\x14\x98\x7f\x1b\xf0\xe2\x93\xa1\x9b\x6d\x6b\x99\x9e\xd9\x50\xf0\xd1\xfc\xf0\xc7\x5a\x2f\x2e\x86\x4b\x91\x95\xb8\x72\x03\x6c\x72\x0c\x67\x95\xf3\x68\x10\x33\x15\x19\x36\xd3\x60\x38\x3a\x16\x5c\xf9\xda\xa5\x94\xdb\x72\xa3\x97\x34\x71\x09\xbb\x6e\xb0\x4c\x24\xe8\x07\x8d\x73\xaf\xaf\xda\xd4\x23\x48\xe7\x10\xc7\x10\xfb\x78\xf7\x35\x69\x38\xf4\x3b\x04\x8e\x2e\x99\xc0\x1f\x8b\xa7\x22\x49\xda\xcf\xf4\x56\xc3\x4a\x1f\xb3\x8a\x07\x40\xef\x38\x93\x0e\x31\xf3\xc4\x03\x94\xa9\x82\x22\x4b\x47\x34\x22\x99\x51\x58\x0a\xb8\xcf\x41\x5f\x01\x70\x12\xad\x20\xba\x50\x65\x8c\x9d\x36\x74\xb8\xb1\xd1\x2e\x86\xaa\x5d\x40\xac\x72\xd0\x42\x30\x35\x1b\xea\x92\xee\x81\x30\xa3\x16\x72\xb8\xda\x8c\xc9\xda\x3e\xb8\xe8\x25\x65\x09\x9d\x27\x1d\x0a\xf3\xc9\xa2\x7c\x73\x52\x9d\x3f\xf3\xd2\x51\x96\x27\x89\xf3\x4a\x63\x94\x8a\x96\x74\xb1\x60\x11\x06\x29\x62\x94\x4e\x19\xd5\xbb\x73\xe9\xa3\x22\x73\x94\xa6\x3a\xdf\xda\xfa\x16\xbc\x69\xc3\x17\xa3\x85\xb2\x46\x7b\x6b\x1f\x0c\x79\x5f\xd7\x60\xcd\xec\xc0\xaa\xe8\x35\x87\xd6\x8c\xbc\x15\xda\x45\xbb\xbd\x01\xa5\x5c\xa4\x1d\x79\x0f\x54\x09\x5e\x39\x0a\x50\xf3\x90\x6c\xc9\x38\xdd\x5d\x35\xc1\xae\xbf\x6a\x31\x2f\x14\x4d\xba\xc6\xe2\xce\x6c\x29\xa9\x2e\x38\x78\xb9\x44\x77\x68\x3a\xb1\x60\x91\x63\xc4\x1b\x39\xe2\x6b\x44\x1b\x17\x02\xb7\xdb\xa6\xca\xb8\x96\x22\xce\x23\x70\x65\xac\x73\x55\xed\xf8\x56\xcf\x81\x7a\xf8\x97\x1f\xa3\x4c\x27\x88\x41\x53\xe6\xfc\xd5\x82\x03\xa1\x2a\x33\x5a\xbe\xc7\xf6\x5c\x4a\x3c\x51\xfd\x3e\xe0\x61\x77\x74\x7a\x42\xde\x43\x1b\xd2\x75\xf2\x9d\xae\xf8\xbe\x29\x49\xa8\xd2\xe7\x92\x72\x85\x13\x3e\x67\x69\x93\x89\xc2\x68\x56\x88\x01\x8d\xcf\x25\x62\x45\xe3\x63\x8b\x02\x8d\x8f\x1b\xb8\x77\x1f\xce\xb9\xbd\x86\xdb\xb0\xf6\x6f\xf7\x5a\xa6\x47\x1a\xb9\xc4\xdb\x7a\x0a\x8c\x32\x9c\xc4\xbd\x0d\xae\xd4\xb7\xd9\x6a\x87\xfa\x98\xeb\x82\xea\x76\x1b\x03\xc1\x40\x9b\x52\x4b\xbf\xf2\xbe\xc1\x9c\xc7\x20\x13\xf4\xb9\x95\xe3\x45\x2b\x23\x24\xc7\x33\xa7\xfc\xd3\xc2\x8e\x83\x61\xae\xce\xb3\x58\x3a\x0d\x6c\xc0\x9c\xef\xd1\x60\x97\xab\xb1\x6e\xbb\x41\x26\x1b\x45\x90\xe9\x76\x2e\xdb\xcb\x26\xe7\x0d\x2b\x46\x48\x98\xea\x66\xac\x72\x38\x75\x1b\xfb\xe5\xba\xb2\x31\x29\xab\x3c\xa5\x86\x41\xd1\x18\x03\xe4\x8a\x67\x56\x47\xb2\x3a\x8d\xa5\x48\xeb\xb8\xb3\x8e\x17\xbf\x7d\x9d\x3b\xe4\x98\x16\x2d\x52\xed\x3b\xac\x25\xbd\x60\x96\xd2\xeb\xd7\xc0\x97\x7a\x75\x48\xfe\xf8\xd5\x9f\xbe\xfe\x73\xc3\x8b\x62\x6e\x63\xda\x5e\x01\x77\x56\xa2\xdb\x80\xde\x76\xaf\x9b\x66\xce\x99\x0f\xc5\x9e\x2d\xcb\x77\x0a\x87\x42\x89\x95\xe8\x84\x06\xed\xf8\x76\x9e\xb5\x83\xf3\x7b\x4c\x26\x50\x9a\xf2\x08\x26\x46\x1c\xd8\x39\x8c\x51\x65\x2d\x8f\x4c\xd6\xe4\xf9\x57\x13\x0c\xf1\xc4\x49\x59\xea\x9a\x95\x6c\xfd\xc3\xf5\xc7\xd9\x8e\xc5\x30\x45\xfe\x32\xd9\x98\x29\x53\xc4\xec\xbd\x58\x20\x9a\xb6\x4c\x12\xd5\x3e\x09\x96\x67\x7b\x23\xc0\x36\xcf\x86\x62\x25\x5d\x98\xd0\x65\x9b\xec\x67\x97\x4c\x19\x67\x69\x9e\x1e\x92\x67\x0d\xaf\x58\x8e\x7c\x1b\xe8\x61\x7b\x2a\xcf\x33\x6a\xd8\xf2\x52\xd2\x34\xc5\x8c\x2b\x16\x03\xd7\x6c\xc1\x30\xe6\xa5\x20\x31\xd4\xe3\xed\x87\x3e\x4e\xab\x00\x3e\x86\x70\x19\x36\xda\x8b\xe8\x4e\xed\x39\x2e\x51\x28\x75\x5e\xbb\xa8\xca\x79\xd7\x19\x58\xaa\xb4\x6a\x03\x81\xeb\xcc\x4a\x72\x15\xff\x51\x0a\x94\x33\xbe\x54\x65\xb4\x25\xf2\xbf\x36\xf3\xb8\xf9\xec\x6a\x05\x2e\xba\x01\xaa\xde\x41\x5f\xba\xc8\x88\x92\x65\x90\x31\x06\x9e\xb7\xb3\x8f\x6d\x5b\xac\x91\xba\x52\x48\x8e\xa9\x82\x1e\x76\xd7\x4a\x2c\xa6\xbf\x76\xa2\x48\x0e\xbe\x35\x06\xf4\xfc\xd9\x57\xad\x78\x57\xbc\xd7\xf8\x52\x19\xa5\xf9\xe1\x68\xfa\x4f\x3a\xfd\xf5\xe3\x9e\xfb\xc7\xb3\xe9\x5f\xfe\xdf\xe4\xf0\xe3\xd3\xca\x9f\x1f\x9b\x83\x2b\x77\x4b\xce\x65\xab\xe1\xb0\x3b\x6b\xbd\x58\xe5\xf1\x63\xe2\x83\xb9\xce\x65\x0e\x13\xf2\x3d\x4d\x14\x4c\xc8\x8f\x1c\xcf\xc9\x1b\x02\xad\x3d\xba\xc2\x48\x36\x8f\xcd\xa8\x8f\xdb\x5f\xc1\x29\xb5\xbf\xe3\xa6\xdb\xa6\x82\xf6\x03\x92\xb7\x2c\x54\x18\x21\xaf\x60\xa0\xcd\xee\x5a\x08\x31\x83\x6b\x9a\x66\x09\xcc\x22\x91\x1e\x14\xcf\x6f\xf1\x90\x7b\xfe\x75\x0f\xec\xd9\xfb\x60\x71\xe4\xe3\xde\x87\xa9\xfb\xd7\x53\xff\xd3\xfe\x77\x7b\xff\x9a\xb5\x3e\xdf\x7f\x7a\x80\xa1\xbd\x05\xaa\x7d\xfc\x30\x2d\xd1\x6e\xf6\xf1\xe9\xfe\x77\x95\x67\xfb\xbb\x90\x70\x3b\x89\x2a\xa5\xd9\xf4\xa2\xb1\x82\x5d\xa3\x28\xdb\x94\x8d\x95\xd2\x6c\x97\x8a\xb7\x60\xcb\x37\x34\x7b\x0f\x0b\x90\xc0\xa3\x6e\x13\xd2\xf1\xd6\x27\x64\x2f\x36\x47\x38\xe6\xe0\xed\x7b\xe1\x55\x16\x4f\xdd\x41\x56\x7c\xe7\xb9\x7b\x71\x21\xd4\x66\x4c\x4e\x2d\xd2\x6d\x52\x8a\x92\x3b\x94\xf5\xb2\xd7\xdb\x77\x01\x71\xda\x20\x4e\x4e\xcb\xa8\xbb\x11\x86\x1e\x73\x1c\x59\x83\x57\x9b\x06\xd1\x03\xc9\xfb\xc9\xbe\xbc\x25\xf4\xb7\x73\x90\x62\x9d\xa3\x7b\xf0\xf7\x74\xfc\x64\xed\x2e\xa3\xfb\xc9\x59\xa3\x72\xd9\x57\xba\xf8\xf1\xe4\x85\xc5\x19\x64\x4c\x28\x51\xae\x44\x12\x2b\x92\x73\xf6\x4b\x0e\xe4\xe4\x45\x51\x7d\x88\xf1\x28\xc9\xf1\x5e\xa3\x1f\x7f\x3c\x79\xa1\x66\x84\xfc\x1d\x22\x6a\xf4\xfa\xab\x96\x10\x4e\x2c\xfd\xf7\xee\xed\xeb\xff\x83\x16\x00\xfc\xd2\x5d\x2b\xe2\x0a\x8b\x24\x8c\x5a\x8b\x94\x3d\x7c\x4d\xaf\x36\xba\x11\x67\x14\xd1\xac\xd9\xc6\x40\x9c\xdd\x8e\xdb\x20\xdd\x15\x24\x99\xc2\xe4\x23\xa2\x72\xe9\x56\x63\x06\xb4\x59\x10\x98\x86\xee\x1c\xe7\x3e\x9f\x0a\xf3\xd5\x46\xc5\x45\x47\x82\x73\x88\x30\x32\xc1\x08\xa1\x7d\x38\x44\xf5\xfd\x4d\x01\x7f\xa7\x44\xbb\x19\xe0\x5c\x8e\xe9\xf9\x87\x37\x90\xde\x3e\xa1\x1b\x8a\x7c\xe7\x44\x79\x9c\xf1\x08\xaa\x76\xbe\xc5\xd1\xf8\x6d\xe6\xe0\xe0\x76\xe7\x2c\x61\x6b\xbd\xa3\x46\xb4\x46\x4b\x74\x06\xbf\xef\xb0\x5d\xd7\xc3\x32\xb7\x2c\x1b\x1b\xb9\xad\x68\x40\x2d\xfc\xc9\x2b\xaa\xc8\x1c\x80\xa3\x3d\xd7\xda\xed\x80\x3b\x9c\x87\xd2\xda\x9a\x67\x53\x2d\xa6\x0d\xfa\x55\x07\xe4\xba\xa1\xd6\x62\x3e\xa8\xad\xed\x68\xb0\x41\xe0\x6a\xb5\xde\x05\x03\x55\xde\x6a\xe4\xa5\xa7\xc1\x5e\xea\x66\xad\xac\x36\x67\x67\x64\x2d\x8e\x6d\xfc\x6b\x7b\x4a\x46\xdd\xae\x19\x9e\xb4\x40\xf7\x62\x8d\x9a\x47\xcc\xd1\x6e\xf3\x19\xc8\x4b\xd6\x43\xf8\x78\x5f\x7f\xbf\x17\x6b\x79\xf5\xfe\xf4\x18\xf3\xf3\xcc\x07\xde\x3f\x81\xd8\x5f\x95\x2a\x6e\xdf\xa3\x13\xd9\x50\xa3\xa3\xbb\x27\xe8\x4c\xc8\xf1\x83\x64\x52\x68\x11\x89\x0e\xa7\x53\x6b\xca\x0c\x82\xb6\x2d\xc7\x68\x48\x1f\x43\xe5\x0d\xcb\xc7\x6a\x59\x69\x4a\x0b\x69\xc8\xb5\xf6\x5b\x3e\x2f\xee\x0c\x2b\x7b\x77\x5a\x1f\xf9\xf7\x7f\x1e\xfd\xff\x00\x00\x00\xff\xff\xdb\x03\xf9\x29\xf9\x5a\x01\x00") func operatorsCoreosCom_catalogsourcesYamlBytes() ([]byte, error) { return bindataRead( diff --git a/staging/api/go.mod b/staging/api/go.mod index 09da846392..7d6514019b 100644 --- a/staging/api/go.mod +++ b/staging/api/go.mod @@ -12,11 +12,11 @@ require ( github.com/spf13/cobra v1.9.1 github.com/stretchr/testify v1.10.0 google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb - k8s.io/api v0.32.2 - k8s.io/apiextensions-apiserver v0.32.2 - k8s.io/apimachinery v0.32.2 - k8s.io/client-go v0.32.2 - sigs.k8s.io/controller-runtime v0.20.2 + k8s.io/api v0.32.3 + k8s.io/apiextensions-apiserver v0.32.3 + k8s.io/apimachinery v0.32.3 + k8s.io/client-go v0.32.3 + sigs.k8s.io/controller-runtime v0.20.4 sigs.k8s.io/yaml v1.4.0 ) @@ -69,12 +69,12 @@ require ( golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/lint v0.0.0-20241112194109-818c5a804067 // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.34.0 // indirect + golang.org/x/net v0.38.0 // indirect golang.org/x/oauth2 v0.27.0 // indirect - golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.29.0 // indirect - golang.org/x/term v0.28.0 // indirect - golang.org/x/text v0.21.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.29.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e // indirect @@ -82,8 +82,8 @@ require ( google.golang.org/protobuf v1.36.5 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.32.2 // indirect - k8s.io/component-base v0.32.2 // indirect + k8s.io/apiserver v0.32.3 // indirect + k8s.io/component-base v0.32.3 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect diff --git a/staging/api/go.sum b/staging/api/go.sum index d38c84f3e9..c5585f0ec6 100644 --- a/staging/api/go.sum +++ b/staging/api/go.sum @@ -179,27 +179,27 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= -golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= -golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -233,18 +233,18 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.2 h1:bZrMLEkgizC24G9eViHGOPbW+aRo9duEISRIJKfdJuw= -k8s.io/api v0.32.2/go.mod h1:hKlhk4x1sJyYnHENsrdCWw31FEmCijNGPJO5WzHiJ6Y= -k8s.io/apiextensions-apiserver v0.32.2 h1:2YMk285jWMk2188V2AERy5yDwBYrjgWYggscghPCvV4= -k8s.io/apiextensions-apiserver v0.32.2/go.mod h1:GPwf8sph7YlJT3H6aKUWtd0E+oyShk/YHWQHf/OOgCA= -k8s.io/apimachinery v0.32.2 h1:yoQBR9ZGkA6Rgmhbp/yuT9/g+4lxtsGYwW6dR6BDPLQ= -k8s.io/apimachinery v0.32.2/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.2 h1:WzyxAu4mvLkQxwD9hGa4ZfExo3yZZaYzoYvvVDlM6vw= -k8s.io/apiserver v0.32.2/go.mod h1:PEwREHiHNU2oFdte7BjzA1ZyjWjuckORLIK/wLV5goM= -k8s.io/client-go v0.32.2 h1:4dYCD4Nz+9RApM2b/3BtVvBHw54QjMFUl1OLcJG5yOA= -k8s.io/client-go v0.32.2/go.mod h1:fpZ4oJXclZ3r2nDOv+Ux3XcJutfrwjKTCHz2H3sww94= -k8s.io/component-base v0.32.2 h1:1aUL5Vdmu7qNo4ZsE+569PV5zFatM9hl+lb3dEea2zU= -k8s.io/component-base v0.32.2/go.mod h1:PXJ61Vx9Lg+P5mS8TLd7bCIr+eMJRQTyXe8KvkrvJq0= +k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= +k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= +k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= +k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= +k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= +k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apiserver v0.32.3 h1:kOw2KBuHOA+wetX1MkmrxgBr648ksz653j26ESuWNY8= +k8s.io/apiserver v0.32.3/go.mod h1:q1x9B8E/WzShF49wh3ADOh6muSfpmFL0I2t+TG0Zdgc= +k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= +k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k= +k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= @@ -253,8 +253,8 @@ k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6J k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.20.2 h1:/439OZVxoEc02psi1h4QO3bHzTgu49bb347Xp4gW1pc= -sigs.k8s.io/controller-runtime v0.20.2/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= +sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= diff --git a/staging/api/pkg/operators/v1alpha1/catalogsource_types.go b/staging/api/pkg/operators/v1alpha1/catalogsource_types.go index 764f176e17..328ba7c08d 100644 --- a/staging/api/pkg/operators/v1alpha1/catalogsource_types.go +++ b/staging/api/pkg/operators/v1alpha1/catalogsource_types.go @@ -168,8 +168,8 @@ type GrpcPodConfig struct { // ExtractContentConfig configures context extraction from a file-based catalog index image. type ExtractContentConfig struct { - // CacheDir is the directory storing the pre-calculated API cache. - CacheDir string `json:"cacheDir"` + // CacheDir is the (optional) directory storing the pre-calculated API cache. + CacheDir string `json:"cacheDir,omitempty"` // CatalogDir is the directory storing the file-based catalog contents. CatalogDir string `json:"catalogDir"` } diff --git a/staging/operator-lifecycle-manager/.github/workflows/go-verdiff.yaml b/staging/operator-lifecycle-manager/.github/workflows/go-verdiff.yaml new file mode 100644 index 0000000000..0ceeddc9d1 --- /dev/null +++ b/staging/operator-lifecycle-manager/.github/workflows/go-verdiff.yaml @@ -0,0 +1,17 @@ +name: go-verdiff +on: + pull_request: + paths: + - '**.mod' + - '.github/workflows/go-verdiff.yaml' + branches: + - master +jobs: + go-verdiff: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Check golang version + run: hack/tools/check-go-version.sh "${{ github.event.pull_request.base.sha }}" diff --git a/staging/operator-lifecycle-manager/deploy/chart/crds/0000_50_olm_00-catalogsources.crd.yaml b/staging/operator-lifecycle-manager/deploy/chart/crds/0000_50_olm_00-catalogsources.crd.yaml index 6d892369c1..c8a0bf8116 100644 --- a/staging/operator-lifecycle-manager/deploy/chart/crds/0000_50_olm_00-catalogsources.crd.yaml +++ b/staging/operator-lifecycle-manager/deploy/chart/crds/0000_50_olm_00-catalogsources.crd.yaml @@ -975,11 +975,10 @@ spec: configured to use *must* be using the file-based catalogs in order to utilize this feature. type: object required: - - cacheDir - catalogDir properties: cacheDir: - description: CacheDir is the directory storing the pre-calculated API cache. + description: CacheDir is the (optional) directory storing the pre-calculated API cache. type: string catalogDir: description: CatalogDir is the directory storing the file-based catalog contents. diff --git a/staging/operator-lifecycle-manager/go.mod b/staging/operator-lifecycle-manager/go.mod index 48c93884d3..84be10473e 100644 --- a/staging/operator-lifecycle-manager/go.mod +++ b/staging/operator-lifecycle-manager/go.mod @@ -1,16 +1,14 @@ module github.com/operator-framework/operator-lifecycle-manager -go 1.23.0 - -toolchain go1.23.4 +go 1.23.6 require ( github.com/blang/semver/v4 v4.0.0 - github.com/containers/image/v5 v5.34.2 + github.com/containers/image/v5 v5.35.0 github.com/coreos/go-semver v0.3.1 github.com/distribution/reference v0.6.0 github.com/evanphx/json-patch v5.9.11+incompatible - github.com/fsnotify/fsnotify v1.8.0 + github.com/fsnotify/fsnotify v1.9.0 github.com/ghodss/yaml v1.0.0 github.com/go-air/gini v1.0.4 github.com/go-logr/logr v1.4.2 @@ -20,25 +18,25 @@ require ( github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 github.com/mitchellh/hashstructure v1.1.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/onsi/ginkgo/v2 v2.23.3 - github.com/onsi/gomega v1.36.2 + github.com/onsi/ginkgo/v2 v2.23.4 + github.com/onsi/gomega v1.37.0 github.com/openshift/api v3.9.0+incompatible github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a - github.com/operator-framework/api v0.30.0 - github.com/operator-framework/operator-registry v1.51.0 + github.com/operator-framework/api v0.31.0 + github.com/operator-framework/operator-registry v1.53.0 github.com/otiai10/copy v1.14.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.21.1 - github.com/prometheus/client_model v0.6.1 + github.com/prometheus/client_model v0.6.2 github.com/prometheus/common v0.63.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.6 github.com/stretchr/testify v1.10.0 - golang.org/x/net v0.37.0 - golang.org/x/sync v0.12.0 + golang.org/x/net v0.39.0 + golang.org/x/sync v0.13.0 golang.org/x/time v0.11.0 - google.golang.org/grpc v1.70.0 + google.golang.org/grpc v1.72.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.32.3 k8s.io/apiextensions-apiserver v0.32.3 @@ -57,9 +55,9 @@ require ( ) require ( - cel.dev/expr v0.19.0 // indirect + cel.dev/expr v0.20.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect - github.com/BurntSushi/toml v1.4.0 // indirect + github.com/BurntSushi/toml v1.5.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/hcsshim v0.12.9 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect @@ -68,7 +66,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/containerd/cgroups/v3 v3.0.3 // indirect + github.com/containerd/cgroups/v3 v3.0.5 // indirect github.com/containerd/containerd v1.7.27 // indirect github.com/containerd/containerd/api v1.8.0 // indirect github.com/containerd/continuity v0.4.4 // indirect @@ -78,16 +76,16 @@ require ( github.com/containerd/platforms v0.2.1 // indirect github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect - github.com/containers/common v0.62.0 // indirect + github.com/containers/common v0.63.0 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect - github.com/containers/storage v1.57.2 // indirect + github.com/containers/storage v1.58.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/docker/cli v28.0.0+incompatible // indirect + github.com/docker/cli v28.1.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.5.1+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.2 // indirect + github.com/docker/docker v28.0.4+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.2 // indirect @@ -99,46 +97,46 @@ require ( github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-migrate/migrate/v4 v4.18.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.22.1 // indirect + github.com/google/cel-go v0.23.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/h2non/filetype v1.1.3 // indirect github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/itchyny/timefmt-go v0.1.6 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-sqlite3 v1.14.24 // indirect + github.com/mattn/go-sqlite3 v1.14.28 // indirect github.com/moby/locker v1.0.1 // indirect github.com/moby/sys/capability v0.4.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect github.com/moby/sys/sequential v0.5.0 // indirect - github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/otiai10/mint v1.6.3 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/procfs v0.15.1 // indirect @@ -149,30 +147,32 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect go.etcd.io/etcd/client/v3 v3.5.16 // indirect go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.36.0 // indirect + golang.org/x/crypto v0.37.0 // indirect golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 // indirect - golang.org/x/mod v0.23.0 // indirect - golang.org/x/oauth2 v0.28.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect - golang.org/x/tools v0.30.0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/oauth2 v0.29.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/term v0.31.0 // indirect + golang.org/x/text v0.24.0 // indirect + golang.org/x/tools v0.31.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e // indirect - google.golang.org/protobuf v1.36.5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect @@ -189,6 +189,11 @@ require ( // issue: https://github.com/operator-framework/operator-lifecycle-manager/issues/3284 replace google.golang.org/grpc => google.golang.org/grpc v1.63.2 +// cel-go v0.23.0 upgrade causes errors raised from the vendor source which lead to think in +// incompatibilities scenarios. After upgrade to use the latest versions of k8s/api v0.33+ +// we should try to see if we could fix this one and remove this replace +replace github.com/google/cel-go => github.com/google/cel-go v0.22.1 + replace ( // controller runtime github.com/openshift/api => github.com/openshift/api v0.0.0-20221021112143-4226c2167e40 // release-4.12 diff --git a/staging/operator-lifecycle-manager/go.sum b/staging/operator-lifecycle-manager/go.sum index 9e7e045807..09dd6bfd95 100644 --- a/staging/operator-lifecycle-manager/go.sum +++ b/staging/operator-lifecycle-manager/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.19.0 h1:lXuo+nDhpyJSpWxpPVi5cPUwzKb+dsdOiw6IreM5yt0= -cel.dev/expr v0.19.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= +cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -1313,8 +1313,8 @@ git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3p github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= -github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= @@ -1323,6 +1323,10 @@ github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6 github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= +github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -1354,8 +1358,6 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -1383,8 +1385,8 @@ github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= -github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= -github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII= github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0= github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= @@ -1403,22 +1405,24 @@ github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRq github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/common v0.62.0 h1:Sl9WE5h7Y/F3bejrMAA4teP1EcY9ygqJmW4iwSloZ10= -github.com/containers/common v0.62.0/go.mod h1:Yec+z8mrSq4rydHofrnDCBqAcNA/BGrSg1kfFUL6F6s= -github.com/containers/image/v5 v5.34.2 h1:3r1etun4uJYq5197tcymUcI1h6+zyzKS9PtRtBlEKMI= -github.com/containers/image/v5 v5.34.2/go.mod h1:MG++slvQSZVq5ejAcLdu4APGsKGMb0YHHnAo7X28fdE= +github.com/containers/common v0.63.0 h1:ox6vgUYX5TSvt4W+bE36sYBVz/aXMAfRGVAgvknSjBg= +github.com/containers/common v0.63.0/go.mod h1:+3GCotSqNdIqM3sPs152VvW7m5+Mg8Kk+PExT3G9hZw= +github.com/containers/image/v5 v5.35.0 h1:T1OeyWp3GjObt47bchwD9cqiaAm/u4O4R9hIWdrdrP8= +github.com/containers/image/v5 v5.35.0/go.mod h1:8vTsgb+1gKcBL7cnjyNOInhJQfTUQjJoO2WWkKDoebM= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= -github.com/containers/storage v1.57.2 h1:2roCtTyE9pzIaBDHibK72DTnYkPmwWaq5uXxZdaWK4U= -github.com/containers/storage v1.57.2/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM= +github.com/containers/storage v1.58.0 h1:Q7SyyCCjqgT3wYNgRNIL8o/wUS92heIj2/cc8Sewvcc= +github.com/containers/storage v1.58.0/go.mod h1:w7Jl6oG+OpeLGLzlLyOZPkmUso40kjpzgrHUk5tyBlo= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -1429,14 +1433,14 @@ github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCd github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v28.0.0+incompatible h1:ido37VmLUqEp+5NFb9icd6BuBB+SNDgCn+5kPCr2buA= -github.com/docker/cli v28.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k= +github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= -github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= -github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker v28.0.4+incompatible h1:JNNkBctYKurkw6FrHfKqY0nKIDf5nrbxjVBtS+cdcok= +github.com/docker/docker v28.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= @@ -1478,8 +1482,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= -github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= @@ -1500,6 +1504,8 @@ github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0q github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= +github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= @@ -1513,12 +1519,26 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= +github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -1548,8 +1568,9 @@ github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwm github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -1609,6 +1630,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= +github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= github.com/google/go-pkcs11 v0.2.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -1635,8 +1658,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= @@ -1689,8 +1712,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c h1:fEE5/5VNnYUoBOj2I9TP8Jc+a7lge3QWn9DKE7NCwfc= @@ -1738,8 +1761,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -1758,14 +1781,16 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -1777,13 +1802,17 @@ github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= -github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= +github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= @@ -1798,8 +1827,8 @@ github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9Kou github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= -github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1811,32 +1840,32 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= -github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= +github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= +github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= -github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= +github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/openshift/api v0.0.0-20221021112143-4226c2167e40 h1:PxjGCA72RtsdHWToZLkjjeWm7WXXx4cuv0u4gtvLbrk= github.com/openshift/api v0.0.0-20221021112143-4226c2167e40/go.mod h1:aQ6LDasvHMvHZXqLHnX2GRmnfTWCF/iIwz8EMTTIE9A= github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c h1:CV76yFOTXmq9VciBR3Bve5ZWzSxdft7gaMVB3kS0rwg= github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c/go.mod h1:lFMO8mLHXWFzSdYvGNo8ivF9SfF6zInA8ZGw4phRnUE= -github.com/operator-framework/api v0.30.0 h1:44hCmGnEnZk/Miol5o44dhSldNH0EToQUG7vZTl29kk= -github.com/operator-framework/api v0.30.0/go.mod h1:FYxAPhjtlXSAty/fbn5YJnFagt6SpJZJgFNNbvDe5W0= -github.com/operator-framework/operator-registry v1.51.0 h1:3T1H2W0wYvJx82x+Ue6nooFsn859ceJf1yH6MdRdjMQ= -github.com/operator-framework/operator-registry v1.51.0/go.mod h1:dJadFTSvsgpeiqhTMK7+zXrhU0LIlx4Y/aDz0efq5oQ= +github.com/operator-framework/api v0.31.0 h1:tRsFTuZ51xD8U5QgiPo3+mZgVipHZVgRXYrI6RRXOh8= +github.com/operator-framework/api v0.31.0/go.mod h1:57oCiHNeWcxmzu1Se8qlnwEKr/GGXnuHvspIYFCcXmY= +github.com/operator-framework/operator-registry v1.53.0 h1:TjJvNxWgd0R+Xv+2ZTluL3Ik0VDv2aHhO/aAPPg5vtA= +github.com/operator-framework/operator-registry v1.53.0/go.mod h1:ypPDmJOl3x6jo4qEbi0/PNri82u0JIzWlPtIwHlyTSE= github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= @@ -1851,6 +1880,10 @@ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qR github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M= +github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1858,8 +1891,8 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= @@ -1868,10 +1901,12 @@ github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fO github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= -github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY= -github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c= +github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= +github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= @@ -1883,8 +1918,20 @@ github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfF github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= +github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= +github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= +github.com/sigstore/fulcio v1.6.6 h1:XaMYX6TNT+8n7Npe8D94nyZ7/ERjEsNGFC+REdi/wzw= +github.com/sigstore/fulcio v1.6.6/go.mod h1:BhQ22lwaebDgIxVBEYOOqLRcN5+xOV+C9bh/GUXRhOk= +github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby1BxGU7Zc= +github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.3.10 h1:/mSvRo4MZ/59ECIlARhyykAlQlkmeAQpvBPlmJtZOCU= +github.com/sigstore/rekor v1.3.10/go.mod h1:JvryKJ40O0XA48MdzYUPu0y4fyvqt0C4iSY7ri9iu3A= +github.com/sigstore/sigstore v1.9.3 h1:y2qlTj+vh+Or3ictKuR3JUFawZPdDxAjrWkeFhon0OQ= +github.com/sigstore/sigstore v1.9.3/go.mod h1:VwYkiw0G0dRtwL25KSs04hCyVFF6CYMd/qvNeYrl7EQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smallstep/pkcs7 v0.1.1 h1:x+rPdt2W088V9Vkjho4KtoggyktZJlMduZAtRHm68LU= +github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= @@ -1895,6 +1942,8 @@ github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1916,12 +1965,16 @@ github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf github.com/substrait-io/substrait-go v0.4.2/go.mod h1:qhpnLmrcvAnlZsUyPXZRqldiHapPTXC3t7xFgDi3aQg= github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/vbatts/tar-split v0.11.7 h1:ixZ93pO/GmvaZw4Vq9OwmfZK/kc2zKdPfu0B+gYqs3U= -github.com/vbatts/tar-split v0.11.7/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbauerster/mpb/v8 v8.9.3 h1:PnMeF+sMvYv9u23l6DO6Q3+Mdj408mjLRXIzmUmU2Z8= +github.com/vbauerster/mpb/v8 v8.9.3/go.mod h1:hxS8Hz4C6ijnppDSIX6LjG8FYJSoPo9iIOcE53Zik0c= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk= @@ -1952,6 +2005,8 @@ go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1961,6 +2016,8 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= @@ -1971,13 +2028,13 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= @@ -1986,12 +2043,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7Z go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= @@ -2005,29 +2062,31 @@ go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -2057,8 +2116,8 @@ golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2124,8 +2183,8 @@ golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= -golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2193,8 +2252,8 @@ golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= -golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2228,8 +2287,8 @@ golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= -golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= -golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= +golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2250,8 +2309,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2341,8 +2400,8 @@ golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2360,8 +2419,8 @@ golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2381,8 +2440,8 @@ golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2460,8 +2519,8 @@ golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2717,8 +2776,8 @@ google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqt google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M= google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= +google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= @@ -2781,8 +2840,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4= google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e h1:YA5lmSs3zc/5w+xsRcHqpETkaYyK63ivEPzNTcUUlSA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1:N/PrbTw4kdkqNRzVfWPrBekzLuarFREcbFOiOLkXon4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -2806,8 +2865,8 @@ google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -2830,9 +2889,9 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2929,6 +2988,8 @@ modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ= +oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c= +oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/staging/operator-lifecycle-manager/hack/tools/check-go-version.sh b/staging/operator-lifecycle-manager/hack/tools/check-go-version.sh new file mode 100755 index 0000000000..f9f2d4fe21 --- /dev/null +++ b/staging/operator-lifecycle-manager/hack/tools/check-go-version.sh @@ -0,0 +1,98 @@ +#!/bin/bash + +########################################### +# Check Go version in go.mod files +# and ensure it is not greater than the +# version in the main go.mod file. +# Also check if the version in the main +# go.mod file is updated in the +# submodules. +# This script is intended to be run +# as part of the CI pipeline to ensure +# that the version of Go that we can use +# is not accidentally upgraded. +# Source: https://github.com/operator-framework/operator-controller/blob/main/hack/tools/check-go-version.sh +# +# PS: We have the intention to centralize +# this implementation in the future. +########################################### + + +BASE_REF=${1:-main} +GO_VER=$(sed -En 's/^go (.*)$/\1/p' "go.mod") +OLDIFS="${IFS}" +IFS='.' MAX_VER=(${GO_VER}) +IFS="${OLDIFS}" + +if [ ${#MAX_VER[*]} -ne 3 -a ${#MAX_VER[*]} -ne 2 ]; then + echo "Invalid go version: ${GO_VER}" + exit 1 +fi + +GO_MAJOR=${MAX_VER[0]} +GO_MINOR=${MAX_VER[1]} +GO_PATCH=${MAX_VER[2]} + +RETCODE=0 + +check_version () { + local whole=$1 + local file=$2 + OLDIFS="${IFS}" + IFS='.' ver=(${whole}) + IFS="${OLDIFS}" + + if [ ${ver[0]} -gt ${GO_MAJOR} ]; then + echo "${file}: ${whole}: Bad golang version (expected ${GO_VER} or less)" + return 1 + fi + if [ ${ver[1]} -gt ${GO_MINOR} ]; then + echo "${file}: ${whole}: Bad golang version (expected ${GO_VER} or less)" + return 1 + fi + + if [ ${#ver[*]} -eq 2 ] ; then + return 0 + fi + if [ ${#ver[*]} -ne 3 ] ; then + echo "${file}: ${whole}: Badly formatted golang version" + return 1 + fi + + if [ ${ver[1]} -eq ${GO_MINOR} -a ${ver[2]} -gt ${GO_PATCH} ]; then + echo "${file}: ${whole}: Bad golang version (expected ${GO_VER} or less)" + return 1 + fi + return 0 +} + +echo "Found golang version: ${GO_VER}" + +for f in $(find . -name "*.mod"); do + v=$(sed -En 's/^go (.*)$/\1/p' ${f}) + if [ -z ${v} ]; then + echo "${f}: Skipping, no version found" + continue + fi + if ! check_version ${v} ${f}; then + RETCODE=1 + fi + old=$(git grep -ohP '^go .*$' "${BASE_REF}" -- "${f}") + old=${old#go } + new=$(git grep -ohP '^go .*$' "${f}") + new=${new#go } + # If ${old} is empty, it means this is a new .mod file + if [ -z "${old}" ]; then + continue + fi + # Check if patch version remains 0: X.x.0 <-> X.x + if [ "${new}.0" == "${old}" -o "${new}" == "${old}.0" ]; then + continue + fi + if [ "${new}" != "${old}" ]; then + echo "${f}: ${v}: Updated golang version from ${old}" + RETCODE=1 + fi +done + +exit ${RETCODE} diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/labeller/filters.go b/staging/operator-lifecycle-manager/pkg/controller/operators/labeller/filters.go index 333bc2b9ea..c07545ae99 100644 --- a/staging/operator-lifecycle-manager/pkg/controller/operators/labeller/filters.go +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/labeller/filters.go @@ -7,6 +7,7 @@ import ( "sync" operators "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install" "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" @@ -177,7 +178,11 @@ func Validate(ctx context.Context, logger *logrus.Logger, metadataClient metadat logger.WithFields(logrus.Fields{ "gvr": gvr.String(), "nonconforming": count, - }).Info("found nonconforming items") + }).Errorf( + "found nonconforming items: missing the the required label %q (metadata.labels[\"%s\"]=\"true\"). ", + install.OLMManagedLabelKey, + install.OLMManagedLabelKey, + ) } okLock.Lock() ok = ok && count == 0 diff --git a/staging/operator-registry/.github/dependabot.yml b/staging/operator-registry/.github/dependabot.yml index a0efd7fc13..025457d216 100644 --- a/staging/operator-registry/.github/dependabot.yml +++ b/staging/operator-registry/.github/dependabot.yml @@ -8,6 +8,14 @@ updates: directory: "/" schedule: interval: "daily" + groups: + k8s-dependencies: + patterns: + - "k8s.io/*" + - "sigs.k8s.io/*" + golang-x-deps: + patterns: + - "golang.org/x/*" - package-exosystem: "docker" directory: "/" schedule: diff --git a/staging/operator-registry/.github/workflows/go-verdiff.yaml b/staging/operator-registry/.github/workflows/go-verdiff.yaml new file mode 100644 index 0000000000..da51f5c2ae --- /dev/null +++ b/staging/operator-registry/.github/workflows/go-verdiff.yaml @@ -0,0 +1,17 @@ +name: go-verdiff +on: + pull_request: + paths: + - '**.mod' + - '.github/workflows/go-verdiff.yaml' + branches: + - main +jobs: + go-verdiff: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Check golang version + run: hack/tools/check-go-version.sh "${{ github.event.pull_request.base.sha }}" diff --git a/staging/operator-registry/.github/workflows/test.yml b/staging/operator-registry/.github/workflows/test.yml index a1b6559d61..a353509cf7 100644 --- a/staging/operator-registry/.github/workflows/test.yml +++ b/staging/operator-registry/.github/workflows/test.yml @@ -11,7 +11,7 @@ on: jobs: e2e: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 @@ -19,11 +19,8 @@ jobs: go-version-file: 'go.mod' - name: Install podman run: | - . /etc/os-release - echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list - curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key | sudo apt-key add - sudo apt-get update - sudo apt-get -y install conntrack podman + sudo apt-get -y install podman podman version - name: Create kind cluster and setup local docker registry run: | diff --git a/staging/operator-registry/Makefile b/staging/operator-registry/Makefile index c2992c598f..6ac2911caa 100644 --- a/staging/operator-registry/Makefile +++ b/staging/operator-registry/Makefile @@ -26,16 +26,14 @@ $(PROTOC): null := space := $(null) # comma := , -# default to json1 for sqlite3 -TAGS := -tags=json1 +# default to json1 for sqlite3 and containers_image_openpgp for containers/image +TAGS := json1,containers_image_openpgp # Cluster to use for e2e testing CLUSTER ?= "" ifeq ($(CLUSTER), kind) # add kind to the list of tags -TAGS += kind -# convert tag format from space to comma list -TAGS := $(subst $(space),$(comma),$(strip $(TAGS))) +TAGS := $(TAGS),kind endif # -race is only supported on linux/amd64, linux/ppc64le, linux/arm64, freebsd/amd64, netbsd/amd64, darwin/amd64 and windows/amd64 @@ -49,12 +47,12 @@ endif all: clean test build $(CMDS): - $(extra_env) $(GO) build $(extra_flags) $(TAGS) -o $@ ./cmd/$(notdir $@) + $(extra_env) $(GO) build $(extra_flags) -tags=$(TAGS) -o $@ ./cmd/$(notdir $@) .PHONY: $(OPM) $(OPM): opm_version_flags=-ldflags "-X '$(PKG)/cmd/opm/version.gitCommit=$(GIT_COMMIT)' -X '$(PKG)/cmd/opm/version.opmVersion=$(OPM_VERSION)' -X '$(PKG)/cmd/opm/version.buildDate=$(BUILD_DATE)'" $(OPM): - $(extra_env) $(GO) build $(opm_version_flags) $(extra_flags) $(TAGS) -o $@ ./cmd/$(notdir $@) + $(extra_env) $(GO) build $(opm_version_flags) $(extra_flags) -tags=$(TAGS) -o $@ ./cmd/$(notdir $@) .PHONY: build build: clean $(CMDS) $(OPM) @@ -63,8 +61,8 @@ build: clean $(CMDS) $(OPM) cross: opm_version_flags=-ldflags "-X '$(PKG)/cmd/opm/version.gitCommit=$(GIT_COMMIT)' -X '$(PKG)/cmd/opm/version.opmVersion=$(OPM_VERSION)' -X '$(PKG)/cmd/opm/version.buildDate=$(BUILD_DATE)'" cross: ifeq ($(shell go env GOARCH),amd64) - GOOS=darwin CC=o64-clang CXX=o64-clang++ CGO_ENABLED=1 $(GO) build $(opm_version_flags) $(TAGS) -o "bin/darwin-amd64-opm" --ldflags "-extld=o64-clang" ./cmd/opm - GOOS=windows CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ CGO_ENABLED=1 $(GO) build $(opm_version_flags) $(TAGS) -o "bin/windows-amd64-opm" --ldflags "-extld=x86_64-w64-mingw32-gcc" -buildmode=exe ./cmd/opm + GOOS=darwin CC=o64-clang CXX=o64-clang++ CGO_ENABLED=1 $(GO) build $(opm_version_flags) -tags=$(TAGS) -o "bin/darwin-amd64-opm" --ldflags "-extld=o64-clang" ./cmd/opm + GOOS=windows CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ CGO_ENABLED=1 $(GO) build $(opm_version_flags) -tags=$(TAGS) -o "bin/windows-amd64-opm" --ldflags "-extld=x86_64-w64-mingw32-gcc" -buildmode=exe ./cmd/opm endif .PHONY: static @@ -73,7 +71,7 @@ static: build .PHONY: unit unit: - $(GO) test -coverprofile=coverage.out $(SPECIFIC_UNIT_TEST) $(SPECIFIC_SKIP_UNIT_TEST) $(TAGS) $(TEST_RACE) -count=1 ./pkg/... ./alpha/... + $(GO) test -coverprofile=coverage.out --coverpkg=./... $(SPECIFIC_UNIT_TEST) $(SPECIFIC_SKIP_UNIT_TEST) -tags=$(TAGS) $(TEST_RACE) -count=1 ./pkg/... ./alpha/... .PHONY: tidy tidy: @@ -102,10 +100,8 @@ image-upstream: docker build -f upstream-example.Dockerfile . .PHONY: lint -#lint: -# find . -type f -name '*.go' ! -name '*.pb.go' -print0 | xargs -0 goimports -w lint: $(GOLANGCI_LINT) - $(GOLANGCI_LINT) run $(GOLANGCI_LINT_ARGS) + $(GOLANGCI_LINT) run --build-tags=$(TAGS) $(GOLANGCI_LINT_ARGS) .PHONY: fix-lint fix-lint: $(GOLANGCI_LINT) @@ -133,7 +129,7 @@ clean: .PHONY: e2e e2e: $(GINKGO) - $(GINKGO) --v --randomize-all --progress --trace --randomize-suites --race $(if $(TEST),-focus '$(TEST)') $(TAGS) ./test/e2e -- $(if $(SKIPTLS),-skip-tls-verify true) $(if $(USEHTTP),-use-http true) + $(GINKGO) --v --randomize-all --progress --trace --randomize-suites --race $(if $(TEST),-focus '$(TEST)') -tags=$(TAGS) ./test/e2e -- $(if $(SKIPTLS),-skip-tls-verify true) $(if $(USEHTTP),-use-http true) .PHONY: release export OPM_IMAGE_REPO ?= quay.io/operator-framework/opm diff --git a/staging/operator-registry/alpha/action/render.go b/staging/operator-registry/alpha/action/render.go index ad7a066b45..a124c0f8a6 100644 --- a/staging/operator-registry/alpha/action/render.go +++ b/staging/operator-registry/alpha/action/render.go @@ -22,9 +22,8 @@ import ( "github.com/operator-framework/operator-registry/alpha/property" "github.com/operator-framework/operator-registry/pkg/containertools" "github.com/operator-framework/operator-registry/pkg/image" - "github.com/operator-framework/operator-registry/pkg/image/containerdregistry" + "github.com/operator-framework/operator-registry/pkg/image/containersimageregistry" "github.com/operator-framework/operator-registry/pkg/lib/bundle" - "github.com/operator-framework/operator-registry/pkg/lib/log" "github.com/operator-framework/operator-registry/pkg/registry" "github.com/operator-framework/operator-registry/pkg/sqlite" ) @@ -66,7 +65,7 @@ func (r Render) Run(ctx context.Context) (*declcfg.DeclarativeConfig, error) { logDeprecationMessage.Do(func() {}) } if r.Registry == nil { - reg, err := r.createRegistry() + reg, err := containersimageregistry.NewDefault() if err != nil { return nil, fmt.Errorf("create registry: %v", err) } @@ -101,26 +100,6 @@ func (r Render) Run(ctx context.Context) (*declcfg.DeclarativeConfig, error) { return combineConfigs(cfgs), nil } -func (r Render) createRegistry() (*containerdregistry.Registry, error) { - cacheDir, err := os.MkdirTemp("", "render-registry-") - if err != nil { - return nil, fmt.Errorf("create tempdir: %v", err) - } - - reg, err := containerdregistry.NewRegistry( - containerdregistry.WithCacheDir(cacheDir), - - // The containerd registry impl is somewhat verbose, even on the happy path, - // so discard all logger logs. Any important failures will be returned from - // registry methods and eventually logged as fatal errors. - containerdregistry.WithLog(log.Null()), - ) - if err != nil { - return nil, err - } - return reg, nil -} - func (r Render) renderReference(ctx context.Context, ref string) (*declcfg.DeclarativeConfig, error) { stat, err := os.Stat(ref) if err != nil { diff --git a/staging/operator-registry/alpha/declcfg/load.go b/staging/operator-registry/alpha/declcfg/load.go index 7cf43ccfe0..5db111b872 100644 --- a/staging/operator-registry/alpha/declcfg/load.go +++ b/staging/operator-registry/alpha/declcfg/load.go @@ -183,15 +183,20 @@ func parseMetaPaths(ctx context.Context, root fs.FS, pathChan <-chan string, wal if !ok { return nil } - file, err := root.Open(path) + err := func() error { // using closure to ensure file is closed immediately after use + file, err := root.Open(path) + if err != nil { + return err + } + defer file.Close() + + return WalkMetasReader(file, func(meta *Meta, err error) error { + return walkFn(path, meta, err) + }) + }() if err != nil { return err } - if err := WalkMetasReader(file, func(meta *Meta, err error) error { - return walkFn(path, meta, err) - }); err != nil { - return err - } } } } diff --git a/staging/operator-registry/cmd/opm/alpha/bundle/validate.go b/staging/operator-registry/cmd/opm/alpha/bundle/validate.go index c1044c05c5..069cfff9eb 100644 --- a/staging/operator-registry/cmd/opm/alpha/bundle/validate.go +++ b/staging/operator-registry/cmd/opm/alpha/bundle/validate.go @@ -10,7 +10,7 @@ import ( "github.com/operator-framework/operator-registry/pkg/containertools" "github.com/operator-framework/operator-registry/pkg/image" - "github.com/operator-framework/operator-registry/pkg/image/containerdregistry" + "github.com/operator-framework/operator-registry/pkg/image/containersimageregistry" "github.com/operator-framework/operator-registry/pkg/image/execregistry" "github.com/operator-framework/operator-registry/pkg/lib/bundle" ) @@ -69,7 +69,7 @@ func validateFunc(cmd *cobra.Command, _ []string) error { case containertools.PodmanTool, containertools.DockerTool: registry, err = execregistry.NewRegistry(tool, logger) case containertools.NoneTool: - registry, err = containerdregistry.NewRegistry(containerdregistry.WithLog(logger)) + registry, err = containersimageregistry.NewDefault() default: err = fmt.Errorf("unrecognized container-tool option: %s", containerTool) } diff --git a/staging/operator-registry/cmd/opm/internal/util/util.go b/staging/operator-registry/cmd/opm/internal/util/util.go index e007caa483..35455b85b6 100644 --- a/staging/operator-registry/cmd/opm/internal/util/util.go +++ b/staging/operator-registry/cmd/opm/internal/util/util.go @@ -7,8 +7,8 @@ import ( "github.com/spf13/cobra" - "github.com/operator-framework/operator-registry/pkg/image/containerdregistry" - "github.com/operator-framework/operator-registry/pkg/lib/log" + "github.com/operator-framework/operator-registry/pkg/image" + "github.com/operator-framework/operator-registry/pkg/image/containersimageregistry" ) // GetTLSOptions validates and returns TLS options set by opm flags @@ -45,27 +45,15 @@ func GetTLSOptions(cmd *cobra.Command) (bool, bool, error) { // This works in tandem with opm/index/cmd, which adds the relevant flags as persistent // as part of the root command (cmd/root/cmd) initialization -func CreateCLIRegistry(cmd *cobra.Command) (*containerdregistry.Registry, error) { +func CreateCLIRegistry(cmd *cobra.Command) (image.Registry, error) { skipTLSVerify, useHTTP, err := GetTLSOptions(cmd) if err != nil { return nil, err } - - cacheDir, err := os.MkdirTemp("", "opm-registry-") - if err != nil { - return nil, err - } - - reg, err := containerdregistry.NewRegistry( - containerdregistry.WithCacheDir(cacheDir), - containerdregistry.SkipTLSVerify(skipTLSVerify), - containerdregistry.WithPlainHTTP(useHTTP), - containerdregistry.WithLog(log.Null()), + return containersimageregistry.New( + containersimageregistry.DefaultSystemContext, + containersimageregistry.WithInsecureSkipTLSVerify(skipTLSVerify || useHTTP), ) - if err != nil { - return nil, err - } - return reg, nil } func OpenFileOrStdin(cmd *cobra.Command, args []string) (io.ReadCloser, string, error) { diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 46b6c0e06f..a49c7d1773 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -1,36 +1,35 @@ module github.com/operator-framework/operator-registry -go 1.23.0 +go 1.23.3 -toolchain go1.23.4 +toolchain go1.23.6 require ( github.com/akrylysov/pogreb v0.10.2 github.com/blang/semver/v4 v4.0.0 - github.com/containerd/containerd v1.7.25 + github.com/containerd/containerd v1.7.27 github.com/containerd/errdefs v1.0.0 - github.com/containers/common v0.62.0 - github.com/containers/image/v5 v5.34.0 + github.com/containers/common v0.63.0 + github.com/containers/image/v5 v5.35.0 github.com/distribution/distribution/v3 v3.0.0-rc.3 github.com/distribution/reference v0.6.0 - github.com/docker/cli v28.0.0+incompatible + github.com/docker/cli v28.1.1+incompatible github.com/golang-migrate/migrate/v4 v4.18.2 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.7.0 - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.1 github.com/grpc-ecosystem/grpc-health-probe v0.4.37 github.com/h2non/filetype v1.1.3 github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c github.com/joelanford/ignore v0.1.1 - github.com/mattn/go-sqlite3 v1.14.24 + github.com/mattn/go-sqlite3 v1.14.28 github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 - github.com/onsi/ginkgo/v2 v2.22.2 - github.com/onsi/gomega v1.36.2 + github.com/onsi/ginkgo/v2 v2.23.4 + github.com/onsi/gomega v1.37.0 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/image-spec v1.1.0 - github.com/operator-framework/api v0.29.0 + github.com/opencontainers/image-spec v1.1.1 + github.com/operator-framework/api v0.31.0 github.com/otiai10/copy v1.14.1 - github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 @@ -39,57 +38,63 @@ require ( github.com/tidwall/btree v1.7.0 go.etcd.io/bbolt v1.4.0 golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 - golang.org/x/mod v0.23.0 - golang.org/x/net v0.35.0 - golang.org/x/sync v0.11.0 - golang.org/x/sys v0.30.0 - golang.org/x/text v0.22.0 - google.golang.org/grpc v1.70.0 + golang.org/x/mod v0.24.0 + golang.org/x/net v0.39.0 + golang.org/x/sync v0.13.0 + golang.org/x/sys v0.32.0 + golang.org/x/text v0.24.0 + google.golang.org/grpc v1.72.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 - google.golang.org/protobuf v1.36.5 + google.golang.org/protobuf v1.36.6 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.32.2 - k8s.io/apiextensions-apiserver v0.32.2 - k8s.io/apimachinery v0.32.2 - k8s.io/client-go v0.32.2 + k8s.io/api v0.32.3 + k8s.io/apiextensions-apiserver v0.32.3 + k8s.io/apimachinery v0.32.3 + k8s.io/client-go v0.32.3 k8s.io/kubectl v0.32.0 - sigs.k8s.io/controller-runtime v0.20.2 - sigs.k8s.io/kind v0.26.0 + oras.land/oras-go/v2 v2.5.0 + sigs.k8s.io/controller-runtime v0.20.4 + sigs.k8s.io/kind v0.27.0 sigs.k8s.io/yaml v1.4.0 ) +// https://github.com/kubernetes/apiserver/issues/116 +// reevaluate when we bump to k8s v1.33.0 +replace github.com/google/cel-go => github.com/google/cel-go v0.22.1 + require ( - cel.dev/expr v0.19.0 // indirect + al.essio.dev/pkg/shellescape v1.5.1 // indirect + cel.dev/expr v0.20.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/BurntSushi/toml v1.4.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/BurntSushi/toml v1.5.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/hcsshim v0.12.9 // indirect - github.com/alessio/shellescape v1.4.2 // indirect + github.com/VividCortex/ewma v1.2.0 // indirect + github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/containerd/cgroups/v3 v3.0.3 // indirect + github.com/containerd/cgroups/v3 v3.0.5 // indirect github.com/containerd/containerd/api v1.8.0 // indirect github.com/containerd/continuity v0.4.4 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect - github.com/containerd/ttrpc v1.2.5 // indirect + github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect - github.com/containers/storage v1.57.1 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/containers/storage v1.58.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.5.1+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.2 // indirect + github.com/docker/docker v28.0.4+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect @@ -101,103 +106,133 @@ require ( github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.6.1 // indirect github.com/go-git/go-git/v5 v5.13.1 // indirect - github.com/go-jose/go-jose/v4 v4.0.4 // indirect + github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.1 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect + github.com/go-openapi/validate v0.24.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/cel-go v0.22.1 // indirect + github.com/google/cel-go v0.23.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-containerregistry v0.20.3 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect + github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/locker v1.0.1 // indirect github.com/moby/spdystream v0.5.0 // indirect github.com/moby/sys/capability v0.4.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect github.com/moby/sys/sequential v0.5.0 // indirect - github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect - github.com/moby/term v0.5.0 // indirect + github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/otiai10/mint v1.6.3 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.20.5 // indirect + github.com/proglottis/gpgme v0.1.4 // indirect + github.com/prometheus/client_golang v1.21.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect - github.com/redis/go-redis/v9 v9.1.0 // indirect + github.com/redis/go-redis/v9 v9.7.3 // indirect + github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/spiffe/go-spiffe/v2 v2.4.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect + github.com/sigstore/fulcio v1.6.6 // indirect + github.com/sigstore/protobuf-specs v0.4.1 // indirect + github.com/sigstore/rekor v1.3.10 // indirect + github.com/sigstore/sigstore v1.9.3 // indirect + github.com/smallstep/pkcs7 v0.1.1 // indirect + github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect + github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect + github.com/vbatts/tar-split v0.12.1 // indirect + github.com/vbauerster/mpb/v8 v8.9.3 // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/zeebo/errs v1.3.0 // indirect + github.com/zeebo/errs v1.4.0 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect go.opentelemetry.io/otel/log v0.8.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.33.0 // indirect - golang.org/x/oauth2 v0.25.0 // indirect - golang.org/x/term v0.29.0 // indirect - golang.org/x/time v0.7.0 // indirect - golang.org/x/tools v0.29.0 // indirect - google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect + go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect + golang.org/x/crypto v0.37.0 // indirect + golang.org/x/oauth2 v0.29.0 // indirect + golang.org/x/term v0.31.0 // indirect + golang.org/x/time v0.11.0 // indirect + golang.org/x/tools v0.31.0 // indirect + google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.32.2 // indirect + k8s.io/apiserver v0.32.3 // indirect k8s.io/cli-runtime v0.32.0 // indirect - k8s.io/component-base v0.32.2 // indirect + k8s.io/component-base v0.32.3 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index d6db88ae7a..b56b4990cb 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -1,25 +1,31 @@ -cel.dev/expr v0.19.0 h1:lXuo+nDhpyJSpWxpPVi5cPUwzKb+dsdOiw6IreM5yt0= -cel.dev/expr v0.19.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho= +al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= +cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= +cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= -github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg= github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y= +github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= +github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/akrylysov/pogreb v0.10.2 h1:e6PxmeyEhWyi2AKOBIJzAEi4HkiC+lKyCocRGlnDi78= github.com/akrylysov/pogreb v0.10.2/go.mod h1:pNs6QmpQ1UlTJKDezuRWmaqkgUE2TuU0YTWyqJZ7+lI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alessio/shellescape v1.4.2 h1:MHPfaU+ddJ0/bYWpgIeUnQUqKrlJ1S7BfEYPM4uEoM0= -github.com/alessio/shellescape v1.4.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= @@ -32,13 +38,12 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w= -github.com/bsm/ginkgo/v2 v2.9.5 h1:rtVBYPs3+TC5iLUVOis1B9tjLTup7Cj5IfzosKtvTJ0= -github.com/bsm/ginkgo/v2 v2.9.5/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= -github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -47,10 +52,10 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= -github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= -github.com/containerd/containerd v1.7.25 h1:khEQOAXOEJalRO228yzVsuASLH42vT7DIo9Ss+9SMFQ= -github.com/containerd/containerd v1.7.25/go.mod h1:tWfHzVI0azhw4CT2vaIjsb2CoV4LJ9PrMPaULAr21Ok= +github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= +github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII= +github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0= github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc= github.com/containerd/continuity v0.4.4 h1:/fNVfTJ7wIl/YPMHjf+5H32uFhl63JucB34PlCpMKII= @@ -63,20 +68,20 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= -github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU= -github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= +github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRqQ= +github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/common v0.62.0 h1:Sl9WE5h7Y/F3bejrMAA4teP1EcY9ygqJmW4iwSloZ10= -github.com/containers/common v0.62.0/go.mod h1:Yec+z8mrSq4rydHofrnDCBqAcNA/BGrSg1kfFUL6F6s= -github.com/containers/image/v5 v5.34.0 h1:HPqQaDUsox/3mC1pbOyLAIQEp0JhQqiUZ+6JiFIZLDI= -github.com/containers/image/v5 v5.34.0/go.mod h1:/WnvUSEfdqC/ahMRd4YJDBLrpYWkGl018rB77iB3FDo= +github.com/containers/common v0.63.0 h1:ox6vgUYX5TSvt4W+bE36sYBVz/aXMAfRGVAgvknSjBg= +github.com/containers/common v0.63.0/go.mod h1:+3GCotSqNdIqM3sPs152VvW7m5+Mg8Kk+PExT3G9hZw= +github.com/containers/image/v5 v5.35.0 h1:T1OeyWp3GjObt47bchwD9cqiaAm/u4O4R9hIWdrdrP8= +github.com/containers/image/v5 v5.35.0/go.mod h1:8vTsgb+1gKcBL7cnjyNOInhJQfTUQjJoO2WWkKDoebM= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= -github.com/containers/storage v1.57.1 h1:hKPoFsuBcB3qTzBxa4IFpZMRzUuL5Xhv/BE44W0XHx8= -github.com/containers/storage v1.57.1/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM= +github.com/containers/storage v1.58.0 h1:Q7SyyCCjqgT3wYNgRNIL8o/wUS92heIj2/cc8Sewvcc= +github.com/containers/storage v1.58.0/go.mod h1:w7Jl6oG+OpeLGLzlLyOZPkmUso40kjpzgrHUk5tyBlo= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= @@ -84,6 +89,8 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -94,14 +101,14 @@ github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCd github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v28.0.0+incompatible h1:ido37VmLUqEp+5NFb9icd6BuBB+SNDgCn+5kPCr2buA= -github.com/docker/cli v28.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k= +github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= -github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= -github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker v28.0.4+incompatible h1:JNNkBctYKurkw6FrHfKqY0nKIDf5nrbxjVBtS+cdcok= +github.com/docker/docker v28.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= @@ -128,8 +135,8 @@ github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/S github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0qu3XXXVixc= -github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= -github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= +github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -140,16 +147,31 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= +github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -157,8 +179,8 @@ github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= @@ -187,15 +209,20 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= +github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -205,12 +232,12 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 h1:kQ0NI7W1B3HwiN5gAYtY+XFItDPbLBwYRxAqbFTyDes= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0/go.mod h1:zrT2dxOAjNFPRGjTUe2Xmb4q4YdUwVvQFV6xiCSf+z0= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.1 h1:KcFzXwzM/kGhIRHvc8jdixfIJjVzuUJdnv+5xsPutog= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.1/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= github.com/grpc-ecosystem/grpc-health-probe v0.4.37 h1:x8IIa8JfqiXpBX3xtmm3jz5b9YMq4Lpc9/b+ZAfzRZI= github.com/grpc-ecosystem/grpc-health-probe v0.4.37/go.mod h1:uKUtQKoBUvaI4Ez1jaLCHxFYYYoYn/FYkqNgl+8hADw= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= @@ -229,6 +256,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= github.com/joelanford/ignore v0.1.1 h1:vKky5RDoPT+WbONrbQBgOn95VV/UPh4ejlyAbbzgnQk= github.com/joelanford/ignore v0.1.1/go.mod h1:8eho/D8fwQ3rIXrLwE23AaeaGDNXqLE9QJ3zJ4LIPCw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -240,8 +269,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -252,21 +281,29 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= -github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= +github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= @@ -277,12 +314,12 @@ github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9Kou github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= -github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -295,37 +332,41 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= -github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= +github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= +github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= -github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/operator-framework/api v0.29.0 h1:TxAR8RCO+I4FjRrY4PSMgnlmbxNWeD8pzHXp7xwHNmw= -github.com/operator-framework/api v0.29.0/go.mod h1:0whQE4mpMDd2zyHkQe+bFa3DLoRs6oGWCbu8dY/3pyc= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= +github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/operator-framework/api v0.31.0 h1:tRsFTuZ51xD8U5QgiPo3+mZgVipHZVgRXYrI6RRXOh8= +github.com/operator-framework/api v0.31.0/go.mod h1:57oCiHNeWcxmzu1Se8qlnwEKr/GGXnuHvspIYFCcXmY= github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M= +github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= -github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= +github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -333,8 +374,8 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= @@ -345,23 +386,42 @@ github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJu github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= -github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY= -github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c= +github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= +github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= +github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= +github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= +github.com/sigstore/fulcio v1.6.6 h1:XaMYX6TNT+8n7Npe8D94nyZ7/ERjEsNGFC+REdi/wzw= +github.com/sigstore/fulcio v1.6.6/go.mod h1:BhQ22lwaebDgIxVBEYOOqLRcN5+xOV+C9bh/GUXRhOk= +github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby1BxGU7Zc= +github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.3.10 h1:/mSvRo4MZ/59ECIlARhyykAlQlkmeAQpvBPlmJtZOCU= +github.com/sigstore/rekor v1.3.10/go.mod h1:JvryKJ40O0XA48MdzYUPu0y4fyvqt0C4iSY7ri9iu3A= +github.com/sigstore/sigstore v1.9.3 h1:y2qlTj+vh+Or3ictKuR3JUFawZPdDxAjrWkeFhon0OQ= +github.com/sigstore/sigstore v1.9.3/go.mod h1:VwYkiw0G0dRtwL25KSs04hCyVFF6CYMd/qvNeYrl7EQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smallstep/pkcs7 v0.1.1 h1:x+rPdt2W088V9Vkjho4KtoggyktZJlMduZAtRHm68LU= +github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spiffe/go-spiffe/v2 v2.4.0 h1:j/FynG7hi2azrBG5cvjRcnQ4sux/VNj8FAVc99Fl66c= -github.com/spiffe/go-spiffe/v2 v2.4.0/go.mod h1:m5qJ1hGzjxjtrkGHZupoXHo/FDWwCB1MdSyBzfHugx0= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -378,17 +438,22 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/vbatts/tar-split v0.11.7 h1:ixZ93pO/GmvaZw4Vq9OwmfZK/kc2zKdPfu0B+gYqs3U= -github.com/vbatts/tar-split v0.11.7/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbauerster/mpb/v8 v8.9.3 h1:PnMeF+sMvYv9u23l6DO6Q3+Mdj408mjLRXIzmUmU2Z8= +github.com/vbauerster/mpb/v8 v8.9.3/go.mod h1:hxS8Hz4C6ijnppDSIX6LjG8FYJSoPo9iIOcE53Zik0c= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs= -github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk= go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk= go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0= @@ -397,18 +462,22 @@ go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSf go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE= go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= @@ -417,12 +486,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7Z go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= @@ -433,20 +502,22 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsu go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -457,8 +528,13 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= -golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 h1:9kj3STMvgqy3YA4VQXBrN7925ICMxD5wzMRcgA30588= golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= @@ -468,8 +544,13 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= -golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -481,12 +562,19 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= +golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -494,8 +582,14 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -506,20 +600,44 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -529,8 +647,12 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= -golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -540,19 +662,19 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= -google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a h1:OAiGFfOiA0v9MRYsSidp3ubZaBnteRUyn3xB2ZQ5G/E= -google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a/go.mod h1:jehYqy3+AhJU9ve55aNOaSml7wUXjF9x6z2LcCfpAhY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4= +google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= +google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1:N/PrbTw4kdkqNRzVfWPrBekzLuarFREcbFOiOLkXon4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= -google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= +google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= +google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 h1:F29+wU6Ee6qgu9TddPgooOdaqsxTMunOoj8KA5yuS5A= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -564,8 +686,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -582,25 +704,24 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.32.2 h1:bZrMLEkgizC24G9eViHGOPbW+aRo9duEISRIJKfdJuw= -k8s.io/api v0.32.2/go.mod h1:hKlhk4x1sJyYnHENsrdCWw31FEmCijNGPJO5WzHiJ6Y= -k8s.io/apiextensions-apiserver v0.32.2 h1:2YMk285jWMk2188V2AERy5yDwBYrjgWYggscghPCvV4= -k8s.io/apiextensions-apiserver v0.32.2/go.mod h1:GPwf8sph7YlJT3H6aKUWtd0E+oyShk/YHWQHf/OOgCA= -k8s.io/apimachinery v0.32.2 h1:yoQBR9ZGkA6Rgmhbp/yuT9/g+4lxtsGYwW6dR6BDPLQ= -k8s.io/apimachinery v0.32.2/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.2 h1:WzyxAu4mvLkQxwD9hGa4ZfExo3yZZaYzoYvvVDlM6vw= -k8s.io/apiserver v0.32.2/go.mod h1:PEwREHiHNU2oFdte7BjzA1ZyjWjuckORLIK/wLV5goM= +k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= +k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= +k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= +k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= +k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= +k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apiserver v0.32.3 h1:kOw2KBuHOA+wetX1MkmrxgBr648ksz653j26ESuWNY8= +k8s.io/apiserver v0.32.3/go.mod h1:q1x9B8E/WzShF49wh3ADOh6muSfpmFL0I2t+TG0Zdgc= k8s.io/cli-runtime v0.32.0 h1:dP+OZqs7zHPpGQMCGAhectbHU2SNCuZtIimRKTv2T1c= k8s.io/cli-runtime v0.32.0/go.mod h1:Mai8ht2+esoDRK5hr861KRy6z0zHsSTYttNVJXgP3YQ= -k8s.io/client-go v0.32.2 h1:4dYCD4Nz+9RApM2b/3BtVvBHw54QjMFUl1OLcJG5yOA= -k8s.io/client-go v0.32.2/go.mod h1:fpZ4oJXclZ3r2nDOv+Ux3XcJutfrwjKTCHz2H3sww94= -k8s.io/component-base v0.32.2 h1:1aUL5Vdmu7qNo4ZsE+569PV5zFatM9hl+lb3dEea2zU= -k8s.io/component-base v0.32.2/go.mod h1:PXJ61Vx9Lg+P5mS8TLd7bCIr+eMJRQTyXe8KvkrvJq0= +k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= +k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k= +k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= @@ -609,14 +730,16 @@ k8s.io/kubectl v0.32.0 h1:rpxl+ng9qeG79YA4Em9tLSfX0G8W0vfaiPVrc/WR7Xw= k8s.io/kubectl v0.32.0/go.mod h1:qIjSX+QgPQUgdy8ps6eKsYNF+YmFOAO3WygfucIqFiE= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c= +oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.20.2 h1:/439OZVxoEc02psi1h4QO3bHzTgu49bb347Xp4gW1pc= -sigs.k8s.io/controller-runtime v0.20.2/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= +sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/kind v0.26.0 h1:8fS6I0Q5WGlmLprSpH0DarlOSdcsv0txnwc93J2BP7M= -sigs.k8s.io/kind v0.26.0/go.mod h1:t7ueEpzPYJvHA8aeLtI52rtFftNgUYUaCwvxjk7phfw= +sigs.k8s.io/kind v0.27.0 h1:PQ3f0iAWNIj66LYkZ1ivhEg/+Zb6UPMbO+qVei/INZA= +sigs.k8s.io/kind v0.27.0/go.mod h1:RZVFmy6qcwlSWwp6xeIUv7kXCPF3i8MXsEXxW/J+gJY= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/staging/operator-registry/hack/tools/check-go-version.sh b/staging/operator-registry/hack/tools/check-go-version.sh new file mode 100755 index 0000000000..61289a4b9d --- /dev/null +++ b/staging/operator-registry/hack/tools/check-go-version.sh @@ -0,0 +1,97 @@ +#!/bin/bash + +########################################### +# Check Go version in go.mod files +# and ensure it is not greater than the +# version in the main go.mod file. +# Also check if the version in the main +# go.mod file is updated in the +# submodules. +# This script is intended to be run +# as part of the CI pipeline to ensure +# that the version of Go that we can use +# is not accidentally upgraded. +# Source: https://github.com/operator-framework/operator-controller/blob/main/hack/tools/check-go-version.sh +# +# PS: We have the intention to centralize +# this implementation in the future. +########################################### + +BASE_REF=${1:-main} +GO_VER=$(sed -En 's/^go (.*)$/\1/p' "go.mod") +OLDIFS="${IFS}" +IFS='.' MAX_VER=(${GO_VER}) +IFS="${OLDIFS}" + +if [ ${#MAX_VER[*]} -ne 3 -a ${#MAX_VER[*]} -ne 2 ]; then + echo "Invalid go version: ${GO_VER}" + exit 1 +fi + +GO_MAJOR=${MAX_VER[0]} +GO_MINOR=${MAX_VER[1]} +GO_PATCH=${MAX_VER[2]} + +RETCODE=0 + +check_version () { + local whole=$1 + local file=$2 + OLDIFS="${IFS}" + IFS='.' ver=(${whole}) + IFS="${OLDIFS}" + + if [ ${ver[0]} -gt ${GO_MAJOR} ]; then + echo "${file}: ${whole}: Bad golang version (expected ${GO_VER} or less)" + return 1 + fi + if [ ${ver[1]} -gt ${GO_MINOR} ]; then + echo "${file}: ${whole}: Bad golang version (expected ${GO_VER} or less)" + return 1 + fi + + if [ ${#ver[*]} -eq 2 ] ; then + return 0 + fi + if [ ${#ver[*]} -ne 3 ] ; then + echo "${file}: ${whole}: Badly formatted golang version" + return 1 + fi + + if [ ${ver[1]} -eq ${GO_MINOR} -a ${ver[2]} -gt ${GO_PATCH} ]; then + echo "${file}: ${whole}: Bad golang version (expected ${GO_VER} or less)" + return 1 + fi + return 0 +} + +echo "Found golang version: ${GO_VER}" + +for f in $(find . -name "*.mod"); do + v=$(sed -En 's/^go (.*)$/\1/p' ${f}) + if [ -z ${v} ]; then + echo "${f}: Skipping, no version found" + continue + fi + if ! check_version ${v} ${f}; then + RETCODE=1 + fi + old=$(git grep -ohP '^go .*$' "${BASE_REF}" -- "${f}") + old=${old#go } + new=$(git grep -ohP '^go .*$' "${f}") + new=${new#go } + # If ${old} is empty, it means this is a new .mod file + if [ -z "${old}" ]; then + continue + fi + # Check if patch version remains 0: X.x.0 <-> X.x + if [ "${new}.0" == "${old}" -o "${new}" == "${old}.0" ]; then + continue + fi + if [ "${new}" != "${old}" ]; then + echo "${f}: ${v}: Updated golang version from ${old}" + RETCODE=1 + fi +done + +exit ${RETCODE} diff --git a/staging/operator-registry/internal/testutil/image/registry.go b/staging/operator-registry/internal/testutil/image/registry.go new file mode 100644 index 0000000000..736cb003df --- /dev/null +++ b/staging/operator-registry/internal/testutil/image/registry.go @@ -0,0 +1,35 @@ +package image + +import ( + "context" + "net/http" + "net/http/httptest" + + "github.com/distribution/distribution/v3/configuration" + "github.com/distribution/distribution/v3/registry/handlers" + _ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem" // Driver for persisting docker image data to the filesystem. + _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" // Driver for keeping docker image data in memory. +) + +// RunDockerRegistry runs a docker registry on an available port and returns its host string if successful, otherwise it returns an error. +// If the rootDir argument isn't empty, the registry is configured to use this as the root directory for persisting image data to the filesystem. +// If the rootDir argument is empty, the registry is configured to keep image data in memory. +func RunDockerRegistry(ctx context.Context, rootDir string, middlewares ...func(http.Handler) http.Handler) *httptest.Server { + config := &configuration.Configuration{} + config.Log.Level = "error" + + if rootDir != "" { + config.Storage = map[string]configuration.Parameters{"filesystem": map[string]interface{}{ + "rootdirectory": rootDir, + }} + } else { + config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}} + } + + var dockerRegistryApp http.Handler = handlers.NewApp(ctx, config) + for _, m := range middlewares { + dockerRegistryApp = m(dockerRegistryApp) + } + server := httptest.NewTLSServer(dockerRegistryApp) + return server +} diff --git a/staging/operator-registry/pkg/client/client_test.go b/staging/operator-registry/pkg/client/client_test.go index 2224d194c3..e214afafa9 100644 --- a/staging/operator-registry/pkg/client/client_test.go +++ b/staging/operator-registry/pkg/client/client_test.go @@ -20,6 +20,10 @@ type RegistryClientStub struct { Error error } +func (s *RegistryClientStub) List(ctx context.Context, in *grpc_health_v1.HealthListRequest, opts ...grpc.CallOption) (*grpc_health_v1.HealthListResponse, error) { + return nil, nil +} + func (s *RegistryClientStub) ListPackages(ctx context.Context, in *api.ListPackageRequest, opts ...grpc.CallOption) (api.Registry_ListPackagesClient, error) { return nil, nil } diff --git a/staging/operator-registry/pkg/configmap/configmap.go b/staging/operator-registry/pkg/configmap/configmap.go index 2b310371fa..f6ab07ec3c 100644 --- a/staging/operator-registry/pkg/configmap/configmap.go +++ b/staging/operator-registry/pkg/configmap/configmap.go @@ -3,6 +3,8 @@ package configmap import ( "errors" "fmt" + "maps" + "slices" "strings" "github.com/sirupsen/logrus" @@ -68,7 +70,9 @@ func loadBundle(entry *logrus.Entry, cm *corev1.ConfigMap) (*api.Bundle, map[str } // Add kube resources to the bundle. - for name, content := range data { + // Sort keys by name to guarantee consistent ordering of bundle objects. + for _, name := range slices.Sorted(maps.Keys(data)) { + content := data[name] reader := strings.NewReader(content) logger := entry.WithFields(logrus.Fields{ "key": name, diff --git a/staging/operator-registry/pkg/configmap/configmap_test.go b/staging/operator-registry/pkg/configmap/configmap_test.go index 05e54bd936..f167b74516 100644 --- a/staging/operator-registry/pkg/configmap/configmap_test.go +++ b/staging/operator-registry/pkg/configmap/configmap_test.go @@ -62,7 +62,9 @@ func TestLoad(t *testing.T) { unst, err := unstructuredlib.FromString(csvGot) require.NoError(t, err) - assert.True(t, unst.GetName() == "first" || unst.GetName() == "second") + + // The last CSV (by lexicographical sort of configmap data keys) always wins. + assert.Equal(t, "second", unst.GetName()) }, }, { @@ -167,7 +169,9 @@ func TestLoadWriteRead(t *testing.T) { bundleObjects, err := unstructuredlib.FromBundle(bundle) require.NoError(t, err) - assert.ElementsMatch(t, expectedObjects, bundleObjects) + // Assert that the order of manifests from the original manifests + // directory is preserved (by lexicographical sorting by filename) + assert.Equal(t, expectedObjects, bundleObjects) }) } } diff --git a/staging/operator-registry/pkg/image/containersimageregistry/registry.go b/staging/operator-registry/pkg/image/containersimageregistry/registry.go new file mode 100644 index 0000000000..f3452106d4 --- /dev/null +++ b/staging/operator-registry/pkg/image/containersimageregistry/registry.go @@ -0,0 +1,287 @@ +package containersimageregistry + +import ( + "archive/tar" + "context" + "fmt" + "os" + "path/filepath" + + "github.com/containerd/containerd/archive" + "github.com/containers/common/pkg/auth" + "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/pkg/docker/config" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/types" + dockerconfig "github.com/docker/cli/cli/config" + "oras.land/oras-go/v2/content/oci" + + orimage "github.com/operator-framework/operator-registry/pkg/image" +) + +var _ orimage.Registry = (*Registry)(nil) + +type Registry struct { + sourceCtx *types.SystemContext + cache *cacheConfig +} + +var DefaultSystemContext = &types.SystemContext{OSChoice: "linux"} + +func New(sourceCtx *types.SystemContext, opts ...Option) (orimage.Registry, error) { + if sourceCtx == nil { + sourceCtx = &types.SystemContext{} + } + reg := &Registry{ + sourceCtx: sourceCtx, + } + + for _, opt := range opts { + if err := opt(reg); err != nil { + return nil, err + } + } + + if reg.cache == nil { + var err error + reg.cache, err = getDefaultImageCache() + if err != nil { + return nil, err + } + } + + return reg, nil +} + +func NewDefault() (orimage.Registry, error) { + return New(DefaultSystemContext) +} + +type cacheConfig struct { + baseDir string + preserve bool +} + +func (c *cacheConfig) ociLayoutDir() string { + return filepath.Join(c.baseDir, "oci-layout") +} +func (c *cacheConfig) blobInfoCacheDir() string { + return filepath.Join(c.baseDir, "blob-info-cache") +} + +func (c *cacheConfig) getSystemContext() *types.SystemContext { + return &types.SystemContext{ + BlobInfoCacheDir: c.blobInfoCacheDir(), + } +} + +type Option func(*Registry) error + +func getDefaultImageCache() (*cacheConfig, error) { + if dir := os.Getenv("OLM_CACHE_DIR"); dir != "" { + return newCacheConfig(filepath.Join(dir, "images"), true), nil + } + return getTemporaryImageCache() +} + +func getTemporaryImageCache() (*cacheConfig, error) { + tmpDir, err := os.MkdirTemp("", "opm-containers-image-cache-") + if err != nil { + return nil, err + } + return newCacheConfig(tmpDir, false), nil +} + +func newCacheConfig(dir string, preserve bool) *cacheConfig { + return &cacheConfig{ + baseDir: dir, + preserve: preserve, + } +} + +func WithTemporaryImageCache() Option { + return func(r *Registry) error { + var err error + r.cache, err = getTemporaryImageCache() + if err != nil { + return err + } + return nil + } +} + +func WithInsecureSkipTLSVerify(insecureSkipTLSVerify bool) Option { + return func(r *Registry) error { + r.sourceCtx.DockerDaemonInsecureSkipTLSVerify = insecureSkipTLSVerify + r.sourceCtx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(insecureSkipTLSVerify) + r.sourceCtx.OCIInsecureSkipTLSVerify = insecureSkipTLSVerify + return nil + } +} + +func (r *Registry) Pull(ctx context.Context, ref orimage.Reference) error { + namedRef, err := reference.ParseNamed(ref.String()) + if err != nil { + return err + } + dockerRef, err := docker.NewReference(namedRef) + if err != nil { + return err + } + + if err := os.MkdirAll(r.cache.ociLayoutDir(), 0700); err != nil { + return err + } + ociLayoutRef, err := layout.NewReference(r.cache.ociLayoutDir(), ref.String()) + if err != nil { + return err + } + + policy, err := signature.DefaultPolicy(r.sourceCtx) + if err != nil { + return err + } + policyContext, err := signature.NewPolicyContext(policy) + if err != nil { + return err + } + + sourceCtx := r.sourceCtx + authFile := getAuthFile(r.sourceCtx, namedRef.String()) + if authFile != "" { + sourceCtx.AuthFilePath = authFile + } + + if _, err := copy.Image(ctx, policyContext, ociLayoutRef, dockerRef, ©.Options{ + SourceCtx: sourceCtx, + DestinationCtx: r.cache.getSystemContext(), + OptimizeDestinationImageAlreadyExists: true, + }); err != nil { + return err + } + return nil +} + +func (r *Registry) Unpack(ctx context.Context, ref orimage.Reference, unpackDir string) error { + ociLayoutRef, err := layout.NewReference(r.cache.ociLayoutDir(), ref.String()) + if err != nil { + return fmt.Errorf("could not create oci layout reference: %w", err) + } + + ociLayoutCtx := r.cache.getSystemContext() + imageSource, err := ociLayoutRef.NewImageSource(ctx, ociLayoutCtx) + if err != nil { + return fmt.Errorf("failed to create oci image source: %v", err) + } + defer imageSource.Close() + + img, err := image.FromSource(ctx, ociLayoutCtx, imageSource) + if err != nil { + return fmt.Errorf("could not get image from oci image source: %v", err) + } + + if err := os.MkdirAll(unpackDir, 0700); err != nil { + return err + } + + for _, info := range img.LayerInfos() { + if err := func() error { + layer, _, err := imageSource.GetBlob(ctx, info, nil) + if err != nil { + return fmt.Errorf("failed to get blob: %v", err) + } + defer layer.Close() + + decompressed, _, err := compression.AutoDecompress(layer) + if err != nil { + return fmt.Errorf("failed to decompress layer: %v", err) + } + + if _, err := archive.Apply(ctx, unpackDir, decompressed, archive.WithFilter(func(th *tar.Header) (bool, error) { + th.PAXRecords = nil + th.Xattrs = nil //nolint:staticcheck + th.Uid = os.Getuid() + th.Gid = os.Getgid() + th.Mode = 0600 + if th.FileInfo().IsDir() { + th.Mode = 0700 + } + return true, nil + })); err != nil { + return fmt.Errorf("failed to apply layer: %v", err) + } + return nil + }(); err != nil { + return err + } + } + return nil +} + +func (r *Registry) Labels(ctx context.Context, ref orimage.Reference) (map[string]string, error) { + ociLayoutRef, err := layout.NewReference(r.cache.ociLayoutDir(), ref.String()) + if err != nil { + return nil, fmt.Errorf("could not create oci layout reference: %w", err) + } + + ociLayoutCtx := r.cache.getSystemContext() + img, err := ociLayoutRef.NewImage(ctx, ociLayoutCtx) + if err != nil { + return nil, fmt.Errorf("could not load image from oci image reference: %v", err) + } + imgConfig, err := img.OCIConfig(ctx) + if err != nil { + return nil, fmt.Errorf("could not get oci config from image: %v", err) + } + return imgConfig.Config.Labels, nil +} + +func (r *Registry) Destroy() error { + if !r.cache.preserve { + return os.RemoveAll(r.cache.baseDir) + } + + store, err := oci.NewWithContext(context.TODO(), r.cache.ociLayoutDir()) + if err != nil { + return fmt.Errorf("open cache for garbage collection: %v", err) + } + if err := store.GC(context.TODO()); err != nil { + return fmt.Errorf("garbage collection failed: %v", err) + } + return nil +} + +// This is a slight variation on the auth.GetDefaultAuthFile function provided by containers/image. +// The reason for this variation is so that this image registry implementation can be used as a drop-in +// replacement for our existing containerd-based image registry client, and remain compatible with current +// behavior. +func getAuthFile(sourceCtx *types.SystemContext, ref string) string { + // By default, we will use the docker config file in the standard docker config directory. + // However, if REGISTRY_AUTH_FILE or DOCKER_CONFIG environment variables are set, we will + // use those (in that order) instead to derive the auth config file. + authFile := filepath.Join(dockerconfig.Dir(), dockerconfig.ConfigFileName) + if defaultAuthFile := auth.GetDefaultAuthFile(); defaultAuthFile != "" { + authFile = defaultAuthFile + } + + // In order to maintain backward-compatibility with the original credential getter from + // the containerd registry implementation, we will first try to get the credentials from + // the auth config file we derived above, if it exists. If we find a matching credential + // in this file, we'll use this file. + if stat, statErr := os.Stat(authFile); statErr == nil && stat.Mode().IsRegular() { + if _, err := config.GetCredentials(&types.SystemContext{AuthFilePath: authFile}, ref); err == nil { + return authFile + } + } + // If the auth file was unset, doesn't exist, or if we couldn't find credentials in it, + // we'll use system defaults from containers/image (podman/skopeo) to lookup the credentials. + if sourceCtx != nil && sourceCtx.AuthFilePath != "" { + return sourceCtx.AuthFilePath + } + return "" +} diff --git a/staging/operator-registry/pkg/image/registry_test.go b/staging/operator-registry/pkg/image/registry_test.go index ac1df7ca4a..f4de785643 100644 --- a/staging/operator-registry/pkg/image/registry_test.go +++ b/staging/operator-registry/pkg/image/registry_test.go @@ -4,53 +4,107 @@ import ( "context" "crypto/rand" "crypto/x509" + "encoding/pem" "errors" "fmt" "io" "math" "math/big" "net/http" + "net/url" "os" + "path/filepath" "sync" "testing" - distribution "github.com/distribution/distribution/v3" - "github.com/distribution/distribution/v3/configuration" - repositorymiddleware "github.com/distribution/distribution/v3/registry/middleware/repository" + "github.com/containers/image/v5/types" + "github.com/distribution/distribution/v3" "github.com/distribution/reference" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "golang.org/x/mod/sumdb/dirhash" + libimage "github.com/operator-framework/operator-registry/internal/testutil/image" "github.com/operator-framework/operator-registry/pkg/image" "github.com/operator-framework/operator-registry/pkg/image/containerdregistry" - libimage "github.com/operator-framework/operator-registry/pkg/lib/image" + "github.com/operator-framework/operator-registry/pkg/image/containersimageregistry" ) // cleanupFunc is a function that cleans up after some test infra. type cleanupFunc func() // newRegistryFunc is a function that creates and returns a new image.Registry to test its cleanupFunc. -type newRegistryFunc func(t *testing.T, cafile string) (image.Registry, cleanupFunc) +type newRegistryFunc func(t *testing.T, serverCert *x509.Certificate) (image.Registry, cleanupFunc) -func poolForCertFile(t *testing.T, file string) *x509.CertPool { - rootCAs := x509.NewCertPool() - certs, err := os.ReadFile(file) +func caDirForCert(t *testing.T, serverCert *x509.Certificate) string { + caDir := t.TempDir() + caFile, err := os.Create(filepath.Join(caDir, "ca.crt")) + require.NoError(t, err) + + require.NoError(t, pem.Encode(caFile, &pem.Block{ + Type: "CERTIFICATE", + Bytes: serverCert.Raw, + })) + require.NoError(t, caFile.Close()) + return caDir +} + +const insecureSignaturePolicy = `{ + "default": [ + { + "type": "insecureAcceptAnything" + } + ], + "transports": + { + "docker-daemon": + { + "": [{"type":"insecureAcceptAnything"}] + } + } +}` + +func createSignaturePolicyFile(t *testing.T) string { + policyDir := t.TempDir() + policyFilePath := filepath.Join(policyDir, "policy.json") + err := os.WriteFile(policyFilePath, []byte(insecureSignaturePolicy), 0600) require.NoError(t, err) - require.True(t, rootCAs.AppendCertsFromPEM(certs)) + return policyFilePath +} + +func poolForCert(serverCert *x509.Certificate) *x509.CertPool { + rootCAs := x509.NewCertPool() + rootCAs.AddCert(serverCert) return rootCAs } func TestRegistries(t *testing.T) { registries := map[string]newRegistryFunc{ - "containerd": func(t *testing.T, cafile string) (image.Registry, cleanupFunc) { + "containersimage": func(t *testing.T, serverCert *x509.Certificate) (image.Registry, cleanupFunc) { + caDir := caDirForCert(t, serverCert) + policyFile := createSignaturePolicyFile(t) + sourceCtx := &types.SystemContext{ + OCICertPath: caDir, + DockerCertPath: caDir, + DockerPerHostCertDirPath: caDir, + SignaturePolicyPath: policyFile, + } + r, err := containersimageregistry.New(sourceCtx, containersimageregistry.WithTemporaryImageCache()) + require.NoError(t, err) + cleanup := func() { + require.NoError(t, os.RemoveAll(caDir)) + require.NoError(t, r.Destroy()) + } + return r, cleanup + }, + "containerd": func(t *testing.T, serverCert *x509.Certificate) (image.Registry, cleanupFunc) { val, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) require.NoError(t, err) r, err := containerdregistry.NewRegistry( containerdregistry.WithLog(logrus.New().WithField("test", t.Name())), containerdregistry.WithCacheDir(fmt.Sprintf("cache-%x", val)), - containerdregistry.WithRootCAs(poolForCertFile(t, cafile)), + containerdregistry.WithRootCAs(poolForCert(serverCert)), ) require.NoError(t, err) cleanup := func() { @@ -59,30 +113,6 @@ func TestRegistries(t *testing.T) { return r, cleanup }, - // TODO: enable docker tests - currently blocked on a cross-platform way to configure either insecure registries - // or CA certs - //"docker": func(t *testing.T, cafile string) (image.Registry, cleanupFunc) { - // r, err := execregistry.NewRegistry(containertools.DockerTool, - // logrus.New().WithField("test", t.Name()), - // cafile, - // ) - // require.NoError(t, err) - // cleanup := func() { - // require.NoError(t, r.Destroy()) - // } - // - // return r, cleanup - //}, - // TODO: Enable buildah tests - // func(t *testing.T) image.Registry { - // r, err := buildahregistry.NewRegistry( - // buildahregistry.WithLog(logrus.New().WithField("test", t.Name())), - // buildahregistry.WithCacheDir(fmt.Sprintf("cache-%x", rand.Int())), - // ) - // require.NoError(t, err) - - // return r - // }, } for name, registry := range registries { @@ -90,6 +120,18 @@ func TestRegistries(t *testing.T) { } } +type httpError struct { + statusCode int + error error +} + +func (e *httpError) Error() string { + if e.error != nil { + return e.error.Error() + } + return http.StatusText(e.statusCode) +} + func testPullAndUnpack(t *testing.T, name string, newRegistry newRegistryFunc) { type args struct { dockerRootDir string @@ -100,7 +142,18 @@ func testPullAndUnpack(t *testing.T, name string, newRegistry newRegistryFunc) { type expected struct { checksum string pullAssertion require.ErrorAssertionFunc + labels map[string]string + } + + expectedLabels := map[string]string{ + "operators.operatorframework.io.bundle.mediatype.v1": "registry+v1", + "operators.operatorframework.io.bundle.manifests.v1": "manifests/", + "operators.operatorframework.io.bundle.metadata.v1": "metadata/", + "operators.operatorframework.io.bundle.package.v1": "kiali", + "operators.operatorframework.io.bundle.channels.v1": "stable,alpha", + "operators.operatorframework.io.bundle.channel.default.v1": "stable", } + tests := []struct { description string args args @@ -114,6 +167,7 @@ func testPullAndUnpack(t *testing.T, name string, newRegistry newRegistryFunc) { }, expected: expected{ checksum: dirChecksum(t, "testdata/golden/bundles/kiali"), + labels: expectedLabels, pullAssertion: require.NoError, }, }, @@ -125,6 +179,7 @@ func testPullAndUnpack(t *testing.T, name string, newRegistry newRegistryFunc) { }, expected: expected{ checksum: dirChecksum(t, "testdata/golden/bundles/kiali"), + labels: expectedLabels, pullAssertion: require.NoError, }, }, @@ -134,10 +189,11 @@ func testPullAndUnpack(t *testing.T, name string, newRegistry newRegistryFunc) { dockerRootDir: "testdata/golden", img: "/olmtest/kiali:1.4.2", pullErrCount: 1, - pullErr: errors.New("dummy"), + pullErr: &httpError{statusCode: http.StatusTooManyRequests}, }, expected: expected{ checksum: dirChecksum(t, "testdata/golden/bundles/kiali"), + labels: expectedLabels, pullAssertion: require.NoError, }, }, @@ -158,7 +214,7 @@ func testPullAndUnpack(t *testing.T, name string, newRegistry newRegistryFunc) { dockerRootDir: "testdata/golden", img: "/olmtest/kiali:1.4.2", pullErrCount: math.MaxInt64, - pullErr: errors.New("dummy"), + pullErr: &httpError{statusCode: http.StatusTooManyRequests}, }, expected: expected{ pullAssertion: require.Error, @@ -168,51 +224,43 @@ func testPullAndUnpack(t *testing.T, name string, newRegistry newRegistryFunc) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - ctx, close := context.WithCancel(context.Background()) - defer close() - - configOpts := []libimage.ConfigOpt{} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var middlewares []func(next http.Handler) http.Handler if tt.args.pullErrCount > 0 { - configOpts = append(configOpts, func(config *configuration.Configuration) { - if config.Middleware == nil { - config.Middleware = make(map[string][]configuration.Middleware) - } - - mockRepo := &mockRepo{blobStore: &mockBlobStore{ - maxCount: tt.args.pullErrCount, - err: tt.args.pullErr, - }} - val, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) - require.NoError(t, err) - - middlewareName := fmt.Sprintf("test-%x", val) - require.NoError(t, repositorymiddleware.Register(middlewareName, mockRepo.init)) - config.Middleware["repository"] = append(config.Middleware["repository"], configuration.Middleware{ - Name: middlewareName, - }) - }) + middlewares = append(middlewares, failureMiddleware(tt.args.pullErrCount, tt.args.pullErr)) } - host, cafile, err := libimage.RunDockerRegistry(ctx, tt.args.dockerRootDir, configOpts...) - require.NoError(t, err) + dockerServer := libimage.RunDockerRegistry(ctx, tt.args.dockerRootDir, middlewares...) + defer dockerServer.Close() - r, cleanup := newRegistry(t, cafile) + r, cleanup := newRegistry(t, dockerServer.Certificate()) defer cleanup() - ref := image.SimpleReference(host + tt.args.img) - tt.expected.pullAssertion(t, r.Pull(ctx, ref)) + url, err := url.Parse(dockerServer.URL) + require.NoError(t, err) - if tt.expected.checksum != "" { - // Copy golden manifests to a temp dir - dir := "kiali-unpacked" - require.NoError(t, r.Unpack(ctx, ref, dir)) + ref := image.SimpleReference(fmt.Sprintf("%s%s", url.Host, tt.args.img)) + t.Log("pulling image", ref) + pullErr := r.Pull(ctx, ref) + tt.expected.pullAssertion(t, pullErr) + if pullErr != nil { + return + } - checksum := dirChecksum(t, dir) - require.Equal(t, tt.expected.checksum, checksum) + labels, err := r.Labels(ctx, ref) + require.NoError(t, err) + require.Equal(t, tt.expected.labels, labels) - require.NoError(t, os.RemoveAll(dir)) - } + // Copy golden manifests to a temp dir + dir := "kiali-unpacked" + require.NoError(t, r.Unpack(ctx, ref, dir)) + + checksum := dirChecksum(t, dir) + require.Equal(t, tt.expected.checksum, checksum) + + require.NoError(t, os.RemoveAll(dir)) }) } } @@ -303,3 +351,24 @@ func (f *mockBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r func (f *mockBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { return f.base.Delete(ctx, dgst) } + +func failureMiddleware(totalCount int, err error) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + count := 0 + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if count >= totalCount { + next.ServeHTTP(w, r) + return + } + count++ + statusCode := http.StatusInternalServerError + + var httpErr *httpError + if errors.As(err, &httpErr) { + statusCode = httpErr.statusCode + } + + http.Error(w, err.Error(), statusCode) + }) + } +} diff --git a/staging/operator-registry/pkg/lib/bundle/supported_resources.go b/staging/operator-registry/pkg/lib/bundle/supported_resources.go index 94b5fd01df..b9440018f8 100644 --- a/staging/operator-registry/pkg/lib/bundle/supported_resources.go +++ b/staging/operator-registry/pkg/lib/bundle/supported_resources.go @@ -20,6 +20,7 @@ const ( ConsoleQuickStartKind = "ConsoleQuickStart" ConsoleCLIDownloadKind = "ConsoleCLIDownload" ConsoleLinkKind = "ConsoleLink" + ConsolePlugin = "ConsolePlugin" ) // Namespaced indicates whether the resource is namespace scoped (true) or cluster-scoped (false). @@ -47,6 +48,7 @@ var supportedResources = map[string]Namespaced{ ConsoleQuickStartKind: false, ConsoleCLIDownloadKind: false, ConsoleLinkKind: false, + ConsolePlugin: false, } // IsSupported checks if the object kind is OLM-supported and if it is namespaced diff --git a/staging/operator-registry/pkg/lib/image/registry.go b/staging/operator-registry/pkg/lib/image/registry.go deleted file mode 100644 index c535379ae8..0000000000 --- a/staging/operator-registry/pkg/lib/image/registry.go +++ /dev/null @@ -1,218 +0,0 @@ -package image - -import ( - "bytes" - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "io" - "math/big" - "net" - "net/http" - "os" - "time" - - "github.com/distribution/distribution/v3/configuration" - "github.com/distribution/distribution/v3/registry" - _ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem" // Driver for persisting docker image data to the filesystem. - _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" // Driver for keeping docker image data in memory. - "github.com/phayes/freeport" - "k8s.io/apimachinery/pkg/util/wait" -) - -type ConfigOpt func(*configuration.Configuration) - -// RunDockerRegistry runs a docker registry on an available port and returns its host string if successful, otherwise it returns an error. -// If the rootDir argument isn't empty, the registry is configured to use this as the root directory for persisting image data to the filesystem. -// If the rootDir argument is empty, the registry is configured to keep image data in memory. -func RunDockerRegistry(ctx context.Context, rootDir string, configOpts ...ConfigOpt) (string, string, error) { - dockerPort, err := freeport.GetFreePort() - if err != nil { - return "", "", err - } - host := fmt.Sprintf("localhost:%d", dockerPort) - certPool := x509.NewCertPool() - - cafile, err := os.CreateTemp("", "ca") - if err != nil { - return "", "", err - } - certfile, err := os.CreateTemp("", "cert") - if err != nil { - return "", "", err - } - keyfile, err := os.CreateTemp("", "key") - if err != nil { - return "", "", err - } - if err := GenerateCerts(cafile, certfile, keyfile, certPool); err != nil { - return "", "", err - } - if err := cafile.Close(); err != nil { - return "", "", err - } - if err := certfile.Close(); err != nil { - return "", "", err - } - if err := keyfile.Close(); err != nil { - return "", "", err - } - - config := &configuration.Configuration{} - config.HTTP.Addr = host - config.HTTP.TLS.Certificate = certfile.Name() - config.HTTP.TLS.Key = keyfile.Name() - config.Log.Level = "error" - - if rootDir != "" { - config.Storage = map[string]configuration.Parameters{"filesystem": map[string]interface{}{ - "rootdirectory": rootDir, - }} - } else { - config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}} - } - config.HTTP.DrainTimeout = 2 * time.Second - - for _, opt := range configOpts { - opt(config) - } - - dockerRegistry, err := registry.NewRegistry(ctx, config) - if err != nil { - return "", "", err - } - - go func() { - defer func() { - os.Remove(cafile.Name()) - os.Remove(certfile.Name()) - os.Remove(keyfile.Name()) - }() - if err := dockerRegistry.ListenAndServe(); err != nil { - panic(fmt.Errorf("docker registry stopped listening: %v", err)) - } - }() - - // nolint:staticcheck - err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (bool, error) { - tr := &http.Transport{TLSClientConfig: &tls.Config{ - InsecureSkipVerify: false, - RootCAs: certPool, - MinVersion: tls.VersionTLS12, - }} - client := &http.Client{Transport: tr} - r, err := client.Get("https://" + host + "/v2/") - if err != nil { - return false, nil - } - defer r.Body.Close() - if r.StatusCode == http.StatusOK { - return true, nil - } - return false, nil - }) - if err != nil { - return "", "", err - } - - // Return the registry host string - return host, cafile.Name(), nil -} - -func certToPem(der []byte) ([]byte, error) { - out := &bytes.Buffer{} - if err := pem.Encode(out, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil { - return nil, err - } - return out.Bytes(), nil -} - -func keyToPem(key *ecdsa.PrivateKey) ([]byte, error) { - b, err := x509.MarshalECPrivateKey(key) - if err != nil { - return nil, fmt.Errorf("unable to marshal private key: %v", err) - } - out := &bytes.Buffer{} - if err := pem.Encode(out, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}); err != nil { - return nil, err - } - return out.Bytes(), nil -} - -func GenerateCerts(caWriter, certWriter, keyWriter io.Writer, pool *x509.CertPool) error { - priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - if err != nil { - return err - } - ca := x509.Certificate{ - SerialNumber: big.NewInt(1), - Subject: pkix.Name{ - Organization: []string{"test ca"}, - }, - NotBefore: time.Now(), - NotAfter: time.Now().Add(time.Hour), - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - IsCA: true, - } - cert := x509.Certificate{ - SerialNumber: big.NewInt(2), - Subject: pkix.Name{ - Organization: []string{"test cert"}, - }, - NotBefore: time.Now(), - NotAfter: time.Now().Add(time.Hour), - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - IPAddresses: []net.IP{ - net.ParseIP("127.0.0.1"), - net.ParseIP("::1"), - }, - DNSNames: []string{"localhost"}, - } - - caBytes, err := x509.CreateCertificate(rand.Reader, &ca, &ca, &priv.PublicKey, priv) - if err != nil { - return err - } - - caFile, err := certToPem(caBytes) - if err != nil { - return err - } - if _, err := caWriter.Write(caFile); err != nil { - return err - } - pool.AppendCertsFromPEM(caFile) - - certBytes, err := x509.CreateCertificate(rand.Reader, &cert, &ca, &priv.PublicKey, priv) - if err != nil { - return err - } - certFile, err := certToPem(certBytes) - if err != nil { - return err - } - if _, err := certWriter.Write(certFile); err != nil { - return err - } - - keyFile, err := keyToPem(priv) - if err != nil { - return err - } - if _, err := keyWriter.Write(keyFile); err != nil { - return err - } - return nil -} diff --git a/staging/operator-registry/release/goreleaser.darwin.yaml b/staging/operator-registry/release/goreleaser.darwin.yaml index b600710406..d0d47ff79b 100644 --- a/staging/operator-registry/release/goreleaser.darwin.yaml +++ b/staging/operator-registry/release/goreleaser.darwin.yaml @@ -10,7 +10,7 @@ builds: - CGO_ENABLED=1 mod_timestamp: "{{ .CommitTimestamp }}" flags: &build-flags - - -tags=json1 + - -tags=json1,containers_image_openpgp asmflags: &build-asmflags - all=-trimpath={{ .Env.PWD }} gcflags: &build-gcflags diff --git a/staging/operator-registry/release/goreleaser.linux.yaml b/staging/operator-registry/release/goreleaser.linux.yaml index 2f69f8dd57..2822d1d302 100644 --- a/staging/operator-registry/release/goreleaser.linux.yaml +++ b/staging/operator-registry/release/goreleaser.linux.yaml @@ -11,7 +11,7 @@ builds: - CGO_ENABLED=1 mod_timestamp: "{{ .CommitTimestamp }}" flags: &build-flags - - -tags=json1,netgo,osusergo + - -tags=json1,netgo,osusergo,containers_image_openpgp asmflags: &build-asmflags - all=-trimpath={{ .Env.PWD }} gcflags: &build-gcflags diff --git a/staging/operator-registry/release/goreleaser.windows.yaml b/staging/operator-registry/release/goreleaser.windows.yaml index 4232f5eeac..b35be4c7aa 100644 --- a/staging/operator-registry/release/goreleaser.windows.yaml +++ b/staging/operator-registry/release/goreleaser.windows.yaml @@ -10,7 +10,7 @@ builds: - CGO_ENABLED=1 mod_timestamp: "{{ .CommitTimestamp }}" flags: &build-flags - - -tags=json1,netgo,osusergo + - -tags=json1,netgo,osusergo,containers_image_openpgp asmflags: &build-asmflags - all=-trimpath={{ .Env.PWD }} gcflags: &build-gcflags diff --git a/staging/operator-registry/test/e2e/opm_bundle_test.go b/staging/operator-registry/test/e2e/opm_bundle_test.go index b1161fea69..f761261e83 100644 --- a/staging/operator-registry/test/e2e/opm_bundle_test.go +++ b/staging/operator-registry/test/e2e/opm_bundle_test.go @@ -3,7 +3,10 @@ package e2e_test import ( "bytes" "context" + "encoding/pem" "io" + "net/http/httptest" + "net/url" "os" "path/filepath" @@ -12,7 +15,7 @@ import ( "golang.org/x/mod/sumdb/dirhash" - libimage "github.com/operator-framework/operator-registry/pkg/lib/image" + libimage "github.com/operator-framework/operator-registry/internal/testutil/image" ) var _ = Describe("opm alpha bundle", func() { @@ -32,29 +35,34 @@ var _ = Describe("opm alpha bundle", func() { bundleChecksum string tmpDir string rootCA string - stopRegistry func() + server *httptest.Server ) BeforeEach(func() { ctx, cancel := context.WithCancel(context.Background()) - stopRegistry = func() { - cancel() - <-ctx.Done() - } + defer cancel() // Spin up an in-process docker registry with a set of preconfigured test images var ( // Directory containing the docker registry filesystem goldenFiles = "../../pkg/image/testdata/golden" - - host string - err error ) - host, rootCA, err = libimage.RunDockerRegistry(ctx, goldenFiles) - Expect(err).ToNot(HaveOccurred()) + server := libimage.RunDockerRegistry(ctx, goldenFiles) + serverURL, err := url.Parse(server.URL) + Expect(err).NotTo(HaveOccurred()) + + caFile, err := os.CreateTemp("", "ca") + Expect(err).NotTo(HaveOccurred()) + + Expect(pem.Encode(caFile, &pem.Block{ + Type: "CERTIFICATE", + Bytes: server.Certificate().Raw, + })).To(Succeed()) + rootCA = caFile.Name() + caFile.Close() // Create a bundle ref using the local registry host name and the namespace/name of a bundle we already know the content of - bundleRef = host + "/" + imageDomain + "/kiali@sha256:a1bec450c104ceddbb25b252275eb59f1f1e6ca68e0ced76462042f72f7057d8" + bundleRef = serverURL.Host + imageDomain + "/kiali@sha256:a1bec450c104ceddbb25b252275eb59f1f1e6ca68e0ced76462042f72f7057d8" // Generate a checksum of the expected content for the bundle under test bundleChecksum, err = dirhash.HashDir(filepath.Join(goldenFiles, "bundles/kiali"), "", dirhash.DefaultHash) @@ -66,12 +74,13 @@ var _ = Describe("opm alpha bundle", func() { }) AfterEach(func() { - stopRegistry() + server.Close() if CurrentGinkgoTestDescription().Failed { // Skip additional cleanup return } + Expect(os.RemoveAll(rootCA)).To(Succeed()) Expect(os.RemoveAll(tmpDir)).To(Succeed()) }) diff --git a/vendor/cel.dev/expr/README.md b/vendor/cel.dev/expr/README.md index 7930c0b755..42d67f87c2 100644 --- a/vendor/cel.dev/expr/README.md +++ b/vendor/cel.dev/expr/README.md @@ -69,5 +69,3 @@ For more detail, see: * [Language Definition](doc/langdef.md) Released under the [Apache License](LICENSE). - -Disclaimer: This is not an official Google product. diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go index 593b10ab69..194d5e9c94 100644 --- a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go +++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -11,21 +11,13 @@ func (oscState oscStringState) Handle(b byte) (s state, e error) { return nextState, err } - switch { - case isOscStringTerminator(b): + // There are several control characters and sequences which can + // terminate an OSC string. Most of them are handled by the baseState + // handler. The ANSI_BEL character is a special case which behaves as a + // terminator only for an OSC string. + if b == ANSI_BEL { return oscState.parser.ground, nil } return oscState, nil } - -// See below for OSC string terminators for linux -// http://man7.org/linux/man-pages/man4/console_codes.4.html -func isOscStringTerminator(b byte) bool { - - if b == ANSI_BEL || b == 0x5C { - return true - } - - return false -} diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index 639e6c3998..235496eeb2 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages. Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). -Documentation: https://godocs.io/github.com/BurntSushi/toml +Documentation: https://pkg.go.dev/github.com/BurntSushi/toml See the [releases page](https://github.com/BurntSushi/toml/releases) for a changelog; this information is also in the git tag annotations (e.g. `git show diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index 7aaf462c94..3fa516caa2 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -196,6 +196,19 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error { return md.unify(primValue.undecoded, rvalue(v)) } +// markDecodedRecursive is a helper to mark any key under the given tmap as +// decoded, recursing as needed +func markDecodedRecursive(md *MetaData, tmap map[string]any) { + for key := range tmap { + md.decoded[md.context.add(key).String()] = struct{}{} + if tmap, ok := tmap[key].(map[string]any); ok { + md.context = append(md.context, key) + markDecodedRecursive(md, tmap) + md.context = md.context[0 : len(md.context)-1] + } + } +} + // unify performs a sort of type unification based on the structure of `rv`, // which is the client representation. // @@ -222,6 +235,16 @@ func (md *MetaData) unify(data any, rv reflect.Value) error { if err != nil { return md.parseErr(err) } + // Assume the Unmarshaler decoded everything, so mark all keys under + // this table as decoded. + if tmap, ok := data.(map[string]any); ok { + markDecodedRecursive(md, tmap) + } + if aot, ok := data.([]map[string]any); ok { + for _, tmap := range aot { + markDecodedRecursive(md, tmap) + } + } return nil } if v, ok := rvi.(encoding.TextUnmarshaler); ok { @@ -540,12 +563,14 @@ func (md *MetaData) badtype(dst string, data any) error { func (md *MetaData) parseErr(err error) error { k := md.context.String() + d := string(md.data) return ParseError{ + Message: err.Error(), + err: err, LastKey: k, - Position: md.keyInfo[k].pos, + Position: md.keyInfo[k].pos.withCol(d), Line: md.keyInfo[k].pos.Line, - err: err, - input: string(md.data), + input: d, } } diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index 73366c0d9a..ac196e7df8 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -402,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { // Sort keys so that we have deterministic output. And write keys directly // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string + var mapKeysDirect, mapKeysSub []reflect.Value for _, mapKey := range rv.MapKeys() { - k := mapKey.String() if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { - mapKeysSub = append(mapKeysSub, k) + mapKeysSub = append(mapKeysSub, mapKey) } else { - mapKeysDirect = append(mapKeysDirect, k) + mapKeysDirect = append(mapKeysDirect, mapKey) } } - var writeMapKeys = func(mapKeys []string, trailC bool) { - sort.Strings(mapKeys) + writeMapKeys := func(mapKeys []reflect.Value, trailC bool) { + sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() }) for i, mapKey := range mapKeys { - val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) + val := eindirect(rv.MapIndex(mapKey)) if isNil(val) { continue } if inline { - enc.writeKeyValue(Key{mapKey}, val, true) + enc.writeKeyValue(Key{mapKey.String()}, val, true) if trailC || i != len(mapKeys)-1 { enc.wf(", ") } } else { - enc.encode(key.add(mapKey), val) + enc.encode(key.add(mapKey.String()), val) } } } @@ -441,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { } } -const is32Bit = (32 << (^uint(0) >> 63)) == 32 - func pointerTo(t reflect.Type) reflect.Type { if t.Kind() == reflect.Ptr { return pointerTo(t.Elem()) @@ -477,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { frv := eindirect(rv.Field(i)) - if is32Bit { - // Copy so it works correct on 32bit archs; not clear why this - // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 - // This also works fine on 64bit, but 32bit archs are somewhat - // rare and this is a wee bit faster. - copyStart := make([]int, len(start)) - copy(copyStart, start) - start = copyStart - } + // Need to make a copy because ... ehm, I don't know why... I guess + // allocating a new array can cause it to fail(?) + // + // Done for: https://github.com/BurntSushi/toml/issues/430 + // Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314 + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart // Treat anonymous struct fields with tag names as though they are // not anonymous, like encoding/json does. @@ -507,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { } addFields(rt, rv, nil) - writeFields := func(fields [][]int) { + writeFields := func(fields [][]int, totalFields int) { for _, fieldIndex := range fields { fieldType := rt.FieldByIndex(fieldIndex) fieldVal := rv.FieldByIndex(fieldIndex) @@ -537,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.writeKeyValue(Key{keyName}, fieldVal, true) - if fieldIndex[0] != len(fields)-1 { + if fieldIndex[0] != totalFields-1 { enc.wf(", ") } } else { @@ -549,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.wf("{") } - writeFields(fieldsDirect) - writeFields(fieldsSub) + + l := len(fieldsDirect) + len(fieldsSub) + writeFields(fieldsDirect, l) + writeFields(fieldsSub, l) if inline { enc.wf("}") } diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go index b45a3f45f6..b7077d3ae3 100644 --- a/vendor/github.com/BurntSushi/toml/error.go +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -67,21 +67,36 @@ type ParseError struct { // Position of an error. type Position struct { Line int // Line number, starting at 1. + Col int // Error column, starting at 1. Start int // Start of error, as byte offset starting at 0. - Len int // Lenght in bytes. + Len int // Length of the error in bytes. } -func (pe ParseError) Error() string { - msg := pe.Message - if msg == "" { // Error from errorf() - msg = pe.err.Error() +func (p Position) withCol(tomlFile string) Position { + var ( + pos int + lines = strings.Split(tomlFile, "\n") + ) + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= p.Start { + p.Col = p.Start - pos + 1 + if p.Col < 1 { // Should never happen, but just in case. + p.Col = 1 + } + break + } + pos += ll } + return p +} +func (pe ParseError) Error() string { if pe.LastKey == "" { - return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message) } return fmt.Sprintf("toml: line %d (last key %q): %s", - pe.Position.Line, pe.LastKey, msg) + pe.Position.Line, pe.LastKey, pe.Message) } // ErrorWithPosition returns the error with detailed location context. @@ -92,26 +107,19 @@ func (pe ParseError) ErrorWithPosition() string { return pe.Error() } + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + var ( lines = strings.Split(pe.input, "\n") - col = pe.column(lines) b = new(strings.Builder) ) - - msg := pe.Message - if msg == "" { - msg = pe.err.Error() - } - - // TODO: don't show control characters as literals? This may not show up - // well everywhere. - if pe.Position.Len == 1 { fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", - msg, pe.Position.Line, col+1) + pe.Message, pe.Position.Line, pe.Position.Col) } else { fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", - msg, pe.Position.Line, col, col+pe.Position.Len) + pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1) } if pe.Position.Line > 2 { fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3])) @@ -129,7 +137,7 @@ func (pe ParseError) ErrorWithPosition() string { diff := len(expanded) - len(lines[pe.Position.Line-1]) fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded) - fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len)) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len)) return b.String() } @@ -151,23 +159,6 @@ func (pe ParseError) ErrorWithUsage() string { return m } -func (pe ParseError) column(lines []string) int { - var pos, col int - for i := range lines { - ll := len(lines[i]) + 1 // +1 for the removed newline - if pos+ll >= pe.Position.Start { - col = pe.Position.Start - pos - if col < 0 { // Should never happen, but just in case. - col = 0 - } - break - } - pos += ll - } - - return col -} - func expandTab(s string) string { var ( b strings.Builder diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index a1016d98a8..1c3b477029 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -275,7 +275,9 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn { func (lx *lexer) errorf(format string, values ...any) stateFn { if lx.atEOF { pos := lx.getPos() - pos.Line-- + if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' { + pos.Line-- + } pos.Len = 1 pos.Start = lx.pos - 1 lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} @@ -492,6 +494,9 @@ func lexKeyEnd(lx *lexer) stateFn { lx.emit(itemKeyEnd) return lexSkip(lx, lexValue) default: + if r == '\n' { + return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r)) + } return lx.errorf("expected '.' or '=', but got %q instead", r) } } @@ -560,6 +565,9 @@ func lexValue(lx *lexer) stateFn { if r == eof { return lx.errorf("unexpected EOF; expected value") } + if r == '\n' { + return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r)) + } return lx.errorf("expected value but found %q instead", r) } @@ -1111,7 +1119,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn { case 'x': r = lx.peek() if !isHex(r) { - lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r) } return lexHexInteger } @@ -1259,23 +1267,6 @@ func isBinary(r rune) bool { return r == '0' || r == '1' } func isOctal(r rune) bool { return r >= '0' && r <= '7' } func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } func isBareKeyChar(r rune, tomlNext bool) bool { - if tomlNext { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' || - r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || - (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || - (r >= 0x037f && r <= 0x1fff) || - (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || - (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || - (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || - (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || - (r >= 0x10000 && r <= 0xeffff) - } - - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || r == '_' || r == '-' } diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go index e614537300..0d337026c1 100644 --- a/vendor/github.com/BurntSushi/toml/meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -135,9 +135,6 @@ func (k Key) maybeQuoted(i int) string { // Like append(), but only increase the cap by 1. func (k Key) add(piece string) Key { - if cap(k) > len(k) { - return append(k, piece) - } newKey := make(Key, len(k)+1) copy(newKey, k) newKey[len(k)] = piece diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index 11ac3108be..e3ea8a9a2d 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -50,7 +50,6 @@ func parse(data string) (p *parser, err error) { // it anyway. if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 data = data[2:] - //lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447 } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 data = data[3:] } @@ -65,7 +64,7 @@ func parse(data string) (p *parser, err error) { if i := strings.IndexRune(data[:ex], 0); i > -1 { return nil, ParseError{ Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", - Position: Position{Line: 1, Start: i, Len: 1}, + Position: Position{Line: 1, Col: 1, Start: i, Len: 1}, Line: 1, input: data, } @@ -92,8 +91,9 @@ func parse(data string) (p *parser, err error) { func (p *parser) panicErr(it item, err error) { panic(ParseError{ + Message: err.Error(), err: err, - Position: it.pos, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Len, LastKey: p.current(), }) @@ -102,7 +102,7 @@ func (p *parser) panicErr(it item, err error) { func (p *parser) panicItemf(it item, format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), - Position: it.pos, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Len, LastKey: p.current(), }) @@ -111,7 +111,7 @@ func (p *parser) panicItemf(it item, format string, v ...any) { func (p *parser) panicf(format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), - Position: p.pos, + Position: p.pos.withCol(p.lx.input), Line: p.pos.Line, LastKey: p.current(), }) @@ -123,10 +123,11 @@ func (p *parser) next() item { if it.typ == itemError { if it.err != nil { panic(ParseError{ - Position: it.pos, + Message: it.err.Error(), + err: it.err, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Line, LastKey: p.current(), - err: it.err, }) } @@ -527,7 +528,7 @@ func numUnderscoresOK(s string) bool { } } - // isHexis a superset of all the permissable characters surrounding an + // isHex is a superset of all the permissible characters surrounding an // underscore. accept = isHex(r) } diff --git a/vendor/github.com/VividCortex/ewma/.gitignore b/vendor/github.com/VividCortex/ewma/.gitignore new file mode 100644 index 0000000000..c66769f6ca --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/.gitignore @@ -0,0 +1,3 @@ +.DS_Store +.*.sw? +/coverage.txt \ No newline at end of file diff --git a/vendor/github.com/VividCortex/ewma/.whitesource b/vendor/github.com/VividCortex/ewma/.whitesource new file mode 100644 index 0000000000..d7eebc0cf2 --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/.whitesource @@ -0,0 +1,3 @@ +{ + "settingsInheritedFrom": "VividCortex/whitesource-config@master" +} \ No newline at end of file diff --git a/vendor/github.com/VividCortex/ewma/LICENSE b/vendor/github.com/VividCortex/ewma/LICENSE new file mode 100644 index 0000000000..a78d643ed8 --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2013 VividCortex + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/VividCortex/ewma/README.md b/vendor/github.com/VividCortex/ewma/README.md new file mode 100644 index 0000000000..87b4a3c7ef --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/README.md @@ -0,0 +1,145 @@ +# EWMA + +[![GoDoc](https://godoc.org/github.com/VividCortex/ewma?status.svg)](https://godoc.org/github.com/VividCortex/ewma) +![build](https://github.com/VividCortex/ewma/workflows/build/badge.svg) +[![codecov](https://codecov.io/gh/VividCortex/ewma/branch/master/graph/badge.svg)](https://codecov.io/gh/VividCortex/ewma) + +This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our +Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/). + +### Exponentially Weighted Moving Average + +An exponentially weighted moving average is a way to continuously compute a type of +average for a series of numbers, as the numbers arrive. After a value in the series is +added to the average, its weight in the average decreases exponentially over time. This +biases the average towards more recent data. EWMAs are useful for several reasons, chiefly +their inexpensive computational and memory cost, as well as the fact that they represent +the recent central tendency of the series of values. + +The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average +is biased towards recent history. The alpha must be between 0 and 1, and is typically +a fairly small number, such as 0.04. We will discuss the choice of alpha later. + +The algorithm works thus, in pseudocode: + +1. Multiply the next number in the series by alpha. +2. Multiply the current value of the average by 1 minus alpha. +3. Add the result of steps 1 and 2, and store it as the new current value of the average. +4. Repeat for each number in the series. + +There are special-case behaviors for how to initialize the current value, and these vary +between implementations. One approach is to start with the first value in the series; +another is to average the first 10 or so values in the series using an arithmetic average, +and then begin the incremental updating of the average. Each method has pros and cons. + +It may help to look at it pictorially. Suppose the series has five numbers, and we choose +alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300. + +![Data Series](https://user-images.githubusercontent.com/279875/28242350-463289a2-6977-11e7-88ca-fd778ccef1f0.png) + +Now let's take the moving average of those numbers. First we set the average to the value +of the first number. + +![EWMA Step 1](https://user-images.githubusercontent.com/279875/28242353-464c96bc-6977-11e7-9981-dc4e0789c7ba.png) + +Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add +them to generate a new value. + +![EWMA Step 2](https://user-images.githubusercontent.com/279875/28242351-464abefa-6977-11e7-95d0-43900f29bef2.png) + +This continues until we are done. + +![EWMA Step N](https://user-images.githubusercontent.com/279875/28242352-464c58f0-6977-11e7-8cd0-e01e4efaac7f.png) + +Notice how each of the values in the series decays by half each time a new value +is added, and the top of the bars in the lower portion of the image represents the +size of the moving average. It is a smoothed, or low-pass, average of the original +series. + +For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia. + +### Choosing Alpha + +Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average) +that averages over the previous N samples. What is the average age of each sample? It is N/2. + +Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula +to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book +"Production and Operations Analysis" by Steven Nahmias. + +So, for example, if you have a time-series with samples once per second, and you want to get the +moving average over the previous minute, you should use an alpha of .032786885. This, by the way, +is the constant alpha used for this repository's SimpleEWMA. + +### Implementations + +This repository contains two implementations of the EWMA algorithm, with different properties. + +The implementations all conform to the MovingAverage interface, and the constructor returns +that type. + +Current implementations assume an implicit time interval of 1.0 between every sample added. +That is, the passage of time is treated as though it's the same as the arrival of samples. +If you need time-based decay when samples are not arriving precisely at set intervals, then +this package will not support your needs at present. + +#### SimpleEWMA + +A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA +for multiple reasons. It has no warm-up period and it uses a constant +decay. These properties let it use less memory. It will also behave +differently when it's equal to zero, which is assumed to mean +uninitialized, so if a value is likely to actually become zero over time, +then any non-zero value will cause a sharp jump instead of a small change. + +#### VariableEWMA + +Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory. +It also has a "warmup" time when you start adding values to it. It will report a value of 0.0 +until you have added the required number of samples to it. It uses some memory to store the +number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA. + +## Usage + +### API Documentation + +View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma). + +```go +package main + +import "github.com/VividCortex/ewma" + +func main() { + samples := [100]float64{ + 4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149, + } + + e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params + a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1) + + for _, f := range samples { + e.Add(f) + a.Add(f) + } + + e.Value() //=> 13.577404704631077 + a.Value() //=> 1.5806140565521463e-12 +} +``` + +## Contributing + +We only accept pull requests for minor fixes or improvements. This includes: + +* Small bug fixes +* Typos +* Documentation or comments + +Please open issues to discuss new features. Pull requests for new features will be rejected, +so we recommend forking the repository and making changes in your fork for your use case. + +## License + +This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved. +It is licensed under the MIT license. Please see the LICENSE file for applicable license terms. diff --git a/vendor/github.com/VividCortex/ewma/codecov.yml b/vendor/github.com/VividCortex/ewma/codecov.yml new file mode 100644 index 0000000000..0d36d903f9 --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/codecov.yml @@ -0,0 +1,6 @@ +coverage: + status: + project: + default: + threshold: 15% + patch: off diff --git a/vendor/github.com/VividCortex/ewma/ewma.go b/vendor/github.com/VividCortex/ewma/ewma.go new file mode 100644 index 0000000000..44d5d53e3c --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/ewma.go @@ -0,0 +1,126 @@ +// Package ewma implements exponentially weighted moving averages. +package ewma + +// Copyright (c) 2013 VividCortex, Inc. All rights reserved. +// Please see the LICENSE file for applicable license terms. + +const ( + // By default, we average over a one-minute period, which means the average + // age of the metrics in the period is 30 seconds. + AVG_METRIC_AGE float64 = 30.0 + + // The formula for computing the decay factor from the average age comes + // from "Production and Operations Analysis" by Steven Nahmias. + DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1) + + // For best results, the moving average should not be initialized to the + // samples it sees immediately. The book "Production and Operations + // Analysis" by Steven Nahmias suggests initializing the moving average to + // the mean of the first 10 samples. Until the VariableEwma has seen this + // many samples, it is not "ready" to be queried for the value of the + // moving average. This adds some memory cost. + WARMUP_SAMPLES uint8 = 10 +) + +// MovingAverage is the interface that computes a moving average over a time- +// series stream of numbers. The average may be over a window or exponentially +// decaying. +type MovingAverage interface { + Add(float64) + Value() float64 + Set(float64) +} + +// NewMovingAverage constructs a MovingAverage that computes an average with the +// desired characteristics in the moving window or exponential decay. If no +// age is given, it constructs a default exponentially weighted implementation +// that consumes minimal memory. The age is related to the decay factor alpha +// by the formula given for the DECAY constant. It signifies the average age +// of the samples as time goes to infinity. +func NewMovingAverage(age ...float64) MovingAverage { + if len(age) == 0 || age[0] == AVG_METRIC_AGE { + return new(SimpleEWMA) + } + return &VariableEWMA{ + decay: 2 / (age[0] + 1), + } +} + +// A SimpleEWMA represents the exponentially weighted moving average of a +// series of numbers. It WILL have different behavior than the VariableEWMA +// for multiple reasons. It has no warm-up period and it uses a constant +// decay. These properties let it use less memory. It will also behave +// differently when it's equal to zero, which is assumed to mean +// uninitialized, so if a value is likely to actually become zero over time, +// then any non-zero value will cause a sharp jump instead of a small change. +// However, note that this takes a long time, and the value may just +// decays to a stable value that's close to zero, but which won't be mistaken +// for uninitialized. See http://play.golang.org/p/litxBDr_RC for example. +type SimpleEWMA struct { + // The current value of the average. After adding with Add(), this is + // updated to reflect the average of all values seen thus far. + value float64 +} + +// Add adds a value to the series and updates the moving average. +func (e *SimpleEWMA) Add(value float64) { + if e.value == 0 { // this is a proxy for "uninitialized" + e.value = value + } else { + e.value = (value * DECAY) + (e.value * (1 - DECAY)) + } +} + +// Value returns the current value of the moving average. +func (e *SimpleEWMA) Value() float64 { + return e.value +} + +// Set sets the EWMA's value. +func (e *SimpleEWMA) Set(value float64) { + e.value = value +} + +// VariableEWMA represents the exponentially weighted moving average of a series of +// numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory. +type VariableEWMA struct { + // The multiplier factor by which the previous samples decay. + decay float64 + // The current value of the average. + value float64 + // The number of samples added to this instance. + count uint8 +} + +// Add adds a value to the series and updates the moving average. +func (e *VariableEWMA) Add(value float64) { + switch { + case e.count < WARMUP_SAMPLES: + e.count++ + e.value += value + case e.count == WARMUP_SAMPLES: + e.count++ + e.value = e.value / float64(WARMUP_SAMPLES) + e.value = (value * e.decay) + (e.value * (1 - e.decay)) + default: + e.value = (value * e.decay) + (e.value * (1 - e.decay)) + } +} + +// Value returns the current value of the average, or 0.0 if the series hasn't +// warmed up yet. +func (e *VariableEWMA) Value() float64 { + if e.count <= WARMUP_SAMPLES { + return 0.0 + } + + return e.value +} + +// Set sets the EWMA's value. +func (e *VariableEWMA) Set(value float64) { + e.value = value + if e.count <= WARMUP_SAMPLES { + e.count = WARMUP_SAMPLES + 1 + } +} diff --git a/vendor/github.com/acarl005/stripansi/LICENSE b/vendor/github.com/acarl005/stripansi/LICENSE new file mode 100644 index 0000000000..00abe0dbf4 --- /dev/null +++ b/vendor/github.com/acarl005/stripansi/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Andrew Carlson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/acarl005/stripansi/README.md b/vendor/github.com/acarl005/stripansi/README.md new file mode 100644 index 0000000000..8bdb1f5059 --- /dev/null +++ b/vendor/github.com/acarl005/stripansi/README.md @@ -0,0 +1,30 @@ +Strip ANSI +========== + +This Go package removes ANSI escape codes from strings. + +Ideally, we would prevent these from appearing in any text we want to process. +However, sometimes this can't be helped, and we need to be able to deal with that noise. +This will use a regexp to remove those unwanted escape codes. + + +## Install + +```sh +$ go get -u github.com/acarl005/stripansi +``` + +## Usage + +```go +import ( + "fmt" + "github.com/acarl005/stripansi" +) + +func main() { + msg := "\x1b[38;5;140m foo\x1b[0m bar" + cleanMsg := stripansi.Strip(msg) + fmt.Println(cleanMsg) // " foo bar" +} +``` diff --git a/vendor/github.com/acarl005/stripansi/stripansi.go b/vendor/github.com/acarl005/stripansi/stripansi.go new file mode 100644 index 0000000000..235732a782 --- /dev/null +++ b/vendor/github.com/acarl005/stripansi/stripansi.go @@ -0,0 +1,13 @@ +package stripansi + +import ( + "regexp" +) + +const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" + +var re = regexp.MustCompile(ansi) + +func Strip(str string) string { + return re.ReplaceAllString(str, "") +} diff --git a/vendor/github.com/containers/common/pkg/auth/auth.go b/vendor/github.com/containers/common/pkg/auth/auth.go index a3d333a99a..4bcd490db3 100644 --- a/vendor/github.com/containers/common/pkg/auth/auth.go +++ b/vendor/github.com/containers/common/pkg/auth/auth.go @@ -173,10 +173,10 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO if opts.StdinPassword { var stdinPasswordStrBuilder strings.Builder if opts.Password != "" { - return errors.New("Can't specify both --password-stdin and --password") + return errors.New("can't specify both --password-stdin and --password") } if opts.Username == "" { - return errors.New("Must provide --username with --password-stdin") + return errors.New("must provide --username with --password-stdin") } scanner := bufio.NewScanner(opts.Stdin) for scanner.Scan() { diff --git a/vendor/github.com/containers/image/v5/copy/blob.go b/vendor/github.com/containers/image/v5/copy/blob.go new file mode 100644 index 0000000000..8d5580d7cb --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/blob.go @@ -0,0 +1,187 @@ +package copy + +import ( + "context" + "errors" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcReader to dest, +// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, +// perhaps (de/re/)compressing it if canModifyBlob, +// and returns a complete blobInfo of the copied blob. +func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Reader, srcInfo types.BlobInfo, + getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer, + isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { + // The copying happens through a pipeline of connected io.Readers; + // that pipeline is built by updating stream. + // === Input: srcReader + stream := sourceStream{ + reader: srcReader, + info: srcInfo, + } + + // === Process input through digestingReader to validate against the expected digest. + // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, + // use a separate validation failure indicator. + // Note that for this check we don't use the stronger "validationSucceeded" indicator, because + // dest.PutBlob may detect that the layer already exists, in which case we don't + // read stream to the end, and validation does not happen. + digestingReader, err := newDigestingReader(stream.reader, srcInfo.Digest) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("preparing to verify blob %s: %w", srcInfo.Digest, err) + } + stream.reader = digestingReader + + // === Update progress bars + stream.reader = bar.ProxyReader(stream.reader) + + // === Decrypt the stream, if required. + decryptionStep, err := ic.blobPipelineDecryptionStep(&stream, srcInfo) + if err != nil { + return types.BlobInfo{}, err + } + + // === Detect compression of the input stream. + // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. + detectedCompression, err := blobPipelineDetectCompressionStep(&stream, srcInfo) + if err != nil { + return types.BlobInfo{}, err + } + + // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. + var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. + if getOriginalLayerCopyWriter != nil { + stream.reader = io.TeeReader(stream.reader, getOriginalLayerCopyWriter(detectedCompression.decompressor)) + originalLayerReader = stream.reader + } + + // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists + // short-circuit conditions + canModifyBlob := !isConfig && ic.cannotModifyManifestReason == "" + // === Deal with layer compression/decompression if necessary + compressionStep, err := ic.blobPipelineCompressionStep(&stream, canModifyBlob, srcInfo, detectedCompression) + if err != nil { + return types.BlobInfo{}, err + } + defer compressionStep.close() + + // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided + if decryptionStep.decrypting && toEncrypt { + // If nothing else, we can only set uploadedInfo.CryptoOperation to a single value. + // Before relaxing this, see the original pull request’s review if there are other reasons to reject this. + return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy") + } + encryptionStep, err := ic.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep) + if err != nil { + return types.BlobInfo{}, err + } + + // === Report progress using the ic.c.options.Progress channel, if required. + if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 { + progressReader := newProgressReader( + stream.reader, + ic.c.options.Progress, + ic.c.options.ProgressInterval, + srcInfo, + ) + defer progressReader.reportDone() + stream.reader = progressReader + } + + // === Finally, send the layer stream to dest. + options := private.PutBlobOptions{ + Cache: ic.c.blobInfoCache, + IsConfig: isConfig, + EmptyLayer: emptyLayer, + } + if !isConfig { + options.LayerIndex = &layerIndex + } + destBlob, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err) + } + uploadedInfo := updatedBlobInfoFromUpload(stream.info, destBlob) + + compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations) + decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation) + if err := encryptionStep.updateCryptoOperationAndAnnotations(&uploadedInfo.CryptoOperation, &uploadedInfo.Annotations); err != nil { + return types.BlobInfo{}, err + } + + // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume + // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. + // So, read everything from originalLayerReader, which will cause the rest to be + // sent there if we are not already at EOF. + if getOriginalLayerCopyWriter != nil { + logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") + _, err := io.Copy(io.Discard, originalLayerReader) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("reading input blob %s: %w", srcInfo.Digest, err) + } + } + + if digestingReader.validationFailed { // Coverage: This should never happen. + return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) + } + if stream.info.Digest != "" && uploadedInfo.Digest != stream.info.Digest { + return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, stream.info.Digest, uploadedInfo.Digest) + } + if digestingReader.validationSucceeded { + if err := compressionStep.recordValidatedDigestData(ic.c, uploadedInfo, srcInfo, encryptionStep, decryptionStep); err != nil { + return types.BlobInfo{}, err + } + } + + return uploadedInfo, nil +} + +// sourceStream encapsulates an input consumed by copyBlobFromStream, in progress of being built. +// This allows handles of individual aspects to build the copy pipeline without _too much_ +// specific cooperation by the caller. +// +// We are currently very far from a generalized plug-and-play API for building/consuming the pipeline +// without specific knowledge of various aspects in copyBlobFromStream; that may come one day. +type sourceStream struct { + reader io.Reader + info types.BlobInfo // corresponding to the data available in reader. +} + +// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read. +// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination. +type errorAnnotationReader struct { + reader io.Reader +} + +// Read annotates the error happened during read +func (r errorAnnotationReader) Read(b []byte) (n int, err error) { + n, err = r.reader.Read(b) + if err != nil && err != io.EOF { + return n, fmt.Errorf("happened during read: %w", err) + } + return n, err +} + +// updatedBlobInfoFromUpload returns inputInfo updated with uploadedBlob which was created based on inputInfo. +func updatedBlobInfoFromUpload(inputInfo types.BlobInfo, uploadedBlob private.UploadedBlob) types.BlobInfo { + // The transport is only tasked with dealing with the raw blob, and possibly computing Digest/Size. + // Handling of compression, encryption, and the related MIME types and the like are all the responsibility + // of the generic code in this package. + return types.BlobInfo{ + Digest: uploadedBlob.Digest, + Size: uploadedBlob.Size, + URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. + Annotations: inputInfo.Annotations, + MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression/Crypto. + CompressionOperation: inputInfo.CompressionOperation, // Expected to be unset, and only updated by copyBlobFromStream. + CompressionAlgorithm: inputInfo.CompressionAlgorithm, // Expected to be unset, and only updated by copyBlobFromStream. + CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset, and only updated by copyBlobFromStream. + } +} diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go new file mode 100644 index 0000000000..fb5e1b174e --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -0,0 +1,434 @@ +package copy + +import ( + "errors" + "fmt" + "io" + "maps" + + internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + chunkedToc "github.com/containers/storage/pkg/chunked/toc" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +var ( + // defaultCompressionFormat is used if the destination transport requests + // compression, and the user does not explicitly instruct us to use an algorithm. + defaultCompressionFormat = &compression.Gzip + + // compressionBufferSize is the buffer size used to compress a blob + compressionBufferSize = 1048576 + + // expectedBaseCompressionFormats is used to check if a blob with a specified media type is compressed + // using the algorithm that the media type says it should be compressed with + expectedBaseCompressionFormats = map[string]*compressiontypes.Algorithm{ + imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip, + imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd, + manifest.DockerV2Schema2LayerMediaType: &compression.Gzip, + } +) + +// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step. +type bpDetectCompressionStepData struct { + isCompressed bool + format compressiontypes.Algorithm // Valid if isCompressed + decompressor compressiontypes.DecompressorFunc // Valid if isCompressed + srcCompressorBaseVariantName string // Compressor name to possibly record in the blob info cache for the source blob. +} + +// blobPipelineDetectCompressionStep updates *stream to detect its current compression format. +// srcInfo is only used for error messages. +// Returns data for other steps. +func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobInfo) (bpDetectCompressionStepData, error) { + // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. + format, decompressor, reader, err := compression.DetectCompressionFormat(stream.reader) // We could skip this in some cases, but let's keep the code path uniform + if err != nil { + return bpDetectCompressionStepData{}, fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err) + } + stream.reader = reader + + if decompressor != nil && format.Name() == compressiontypes.ZstdAlgorithmName { + tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return bpDetectCompressionStepData{}, err + } + if tocDigest != nil { + format = compression.ZstdChunked + } + + } + res := bpDetectCompressionStepData{ + isCompressed: decompressor != nil, + format: format, + decompressor: decompressor, + } + if res.isCompressed { + res.srcCompressorBaseVariantName = format.BaseVariantName() + } else { + res.srcCompressorBaseVariantName = internalblobinfocache.Uncompressed + } + + if expectedBaseFormat, known := expectedBaseCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.BaseVariantName() != expectedBaseFormat.Name() { + logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedBaseFormat.Name(), format.Name()) + } + return res, nil +} + +// bpCompressionStepData contains data that the copy pipeline needs about the compression step. +type bpCompressionStepData struct { + operation bpcOperation // What we are actually doing + uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do) + uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits. + uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed. + srcCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the source blob. + uploadedCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the uploaded blob. + uploadedCompressorSpecificVariantName string // Compressor specific variant name to record in the blob info cache for the uploaded blob. + closers []io.Closer // Objects to close after the upload is done, if any. +} + +type bpcOperation int + +const ( + bpcOpInvalid bpcOperation = iota + bpcOpPreserveOpaque // We are preserving something where compression is not applicable + bpcOpPreserveCompressed // We are preserving a compressed, and decompressible, layer + bpcOpPreserveUncompressed // We are preserving an uncompressed, and compressible, layer + bpcOpCompressUncompressed // We are compressing uncompressed data + bpcOpRecompressCompressed // We are recompressing compressed data + bpcOpDecompressCompressed // We are decompressing compressed data +) + +// blobPipelineCompressionStep updates *stream to compress and/or decompress it. +// srcInfo is primarily used for error messages. +// Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData, +// and must eventually call close. +func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModifyBlob bool, srcInfo types.BlobInfo, + detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists + // short-circuit conditions + layerCompressionChangeSupported := ic.src.CanChangeLayerCompression(stream.info.MediaType) + if !layerCompressionChangeSupported { + logrus.Debugf("Compression change for blob %s (%q) not supported", srcInfo.Digest, stream.info.MediaType) + } + if canModifyBlob && layerCompressionChangeSupported { + for _, fn := range []func(*sourceStream, bpDetectCompressionStepData) (*bpCompressionStepData, error){ + ic.bpcPreserveEncrypted, + ic.bpcCompressUncompressed, + ic.bpcRecompressCompressed, + ic.bpcDecompressCompressed, + } { + res, err := fn(stream, detected) + if err != nil { + return nil, err + } + if res != nil { + return res, nil + } + } + } + return ic.bpcPreserveOriginal(stream, detected, layerCompressionChangeSupported), nil +} + +// bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if isOciEncrypted(stream.info.MediaType) { + // We can’t do anything with an encrypted blob unless decrypted. + logrus.Debugf("Using original blob without modification for encrypted blob") + return &bpCompressionStepData{ + operation: bpcOpPreserveOpaque, + uploadedOperation: types.PreserveOriginal, + uploadedAlgorithm: nil, + srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression, + uploadedCompressorBaseVariantName: internalblobinfocache.UnknownCompression, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, + }, nil + } + return nil, nil +} + +// bpcCompressUncompressed checks if we should be compressing an uncompressed input, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed { + logrus.Debugf("Compressing blob on the fly") + var uploadedAlgorithm *compressiontypes.Algorithm + if ic.compressionFormat != nil { + uploadedAlgorithm = ic.compressionFormat + } else { + uploadedAlgorithm = defaultCompressionFormat + } + + reader, annotations := ic.compressedStream(stream.reader, *uploadedAlgorithm) + // Note: reader must be closed on all return paths. + stream.reader = reader + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? + Digest: "", + Size: -1, + } + specificVariantName := uploadedAlgorithm.Name() + if specificVariantName == uploadedAlgorithm.BaseVariantName() { + specificVariantName = internalblobinfocache.UnknownCompression + } + return &bpCompressionStepData{ + operation: bpcOpCompressUncompressed, + uploadedOperation: types.Compress, + uploadedAlgorithm: uploadedAlgorithm, + uploadedAnnotations: annotations, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: uploadedAlgorithm.BaseVariantName(), + uploadedCompressorSpecificVariantName: specificVariantName, + closers: []io.Closer{reader}, + }, nil + } + return nil, nil +} + +// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed && + ic.compressionFormat != nil && + (ic.compressionFormat.Name() != detected.format.Name() && ic.compressionFormat.Name() != detected.format.BaseVariantName()) { + // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally + // re-compressed using the desired format. + logrus.Debugf("Blob will be converted") + + decompressed, err := detected.decompressor(stream.reader) + if err != nil { + return nil, err + } + succeeded := false + defer func() { + if !succeeded { + decompressed.Close() + } + }() + + recompressed, annotations := ic.compressedStream(decompressed, *ic.compressionFormat) + // Note: recompressed must be closed on all return paths. + stream.reader = recompressed + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations. + Digest: "", + Size: -1, + } + specificVariantName := ic.compressionFormat.Name() + if specificVariantName == ic.compressionFormat.BaseVariantName() { + specificVariantName = internalblobinfocache.UnknownCompression + } + succeeded = true + return &bpCompressionStepData{ + operation: bpcOpRecompressCompressed, + uploadedOperation: types.PreserveOriginal, + uploadedAlgorithm: ic.compressionFormat, + uploadedAnnotations: annotations, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: ic.compressionFormat.BaseVariantName(), + uploadedCompressorSpecificVariantName: specificVariantName, + closers: []io.Closer{decompressed, recompressed}, + }, nil + } + return nil, nil +} + +// bpcDecompressCompressed checks if we should be decompressing a compressed input, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Decompress && detected.isCompressed { + logrus.Debugf("Blob will be decompressed") + s, err := detected.decompressor(stream.reader) + if err != nil { + return nil, err + } + // Note: s must be closed on all return paths. + stream.reader = s + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations. + Digest: "", + Size: -1, + } + return &bpCompressionStepData{ + operation: bpcOpDecompressCompressed, + uploadedOperation: types.Decompress, + uploadedAlgorithm: nil, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: internalblobinfocache.Uncompressed, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, + closers: []io.Closer{s}, + }, nil + } + return nil, nil +} + +// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob. +// This does not change the sourceStream parameter; we include it for symmetry with other +// pipeline steps. +func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCompressionStepData, + layerCompressionChangeSupported bool) *bpCompressionStepData { + logrus.Debugf("Using original blob without modification") + // Remember if the original blob was compressed, and if so how, so that if + // LayerInfosForCopy() returned something that differs from what was in the + // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(), + // it will be able to correctly derive the MediaType for the copied blob. + // + // But don’t touch blobs in objects where we can’t change compression, + // so that src.UpdatedImage() doesn’t fail; assume that for such blobs + // LayerInfosForCopy() should not be making any changes in the first place. + var bpcOp bpcOperation + var uploadedOp types.LayerCompression + var algorithm *compressiontypes.Algorithm + switch { + case !layerCompressionChangeSupported: + bpcOp = bpcOpPreserveOpaque + uploadedOp = types.PreserveOriginal + algorithm = nil + case detected.isCompressed: + bpcOp = bpcOpPreserveCompressed + uploadedOp = types.PreserveOriginal + algorithm = &detected.format + default: + bpcOp = bpcOpPreserveUncompressed + uploadedOp = types.Decompress + algorithm = nil + } + return &bpCompressionStepData{ + operation: bpcOp, + uploadedOperation: uploadedOp, + uploadedAlgorithm: algorithm, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + // We only record the base variant of the format on upload; we didn’t do anything with + // the TOC, we don’t know whether it matches the blob digest, so we don’t want to trigger + // reuse of any kind between the blob digest and the TOC digest. + uploadedCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, + } +} + +// updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary. +func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) { + *operation = d.uploadedOperation + // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. + *algorithm = d.uploadedAlgorithm + if *annotations == nil { + *annotations = map[string]string{} + } + maps.Copy(*annotations, d.uploadedAnnotations) +} + +// recordValidatedDigestData updates b.blobInfoCache with data about the created uploadedInfo (as returned by PutBlob) +// and the original srcInfo (which the caller guarantees has been validated). +// This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties. +func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo, + encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error { + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encryptionStep.encrypting && !decryptionStep.decrypting { + // If d.operation != bpcOpPreserve*, we now have two reliable digest values: + // srcinfo.Digest describes the pre-d.operation input, verified by digestingReader + // uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob + // (because we set stream.info.Digest == "", this must have been computed afresh). + switch d.operation { + case bpcOpPreserveOpaque: + // No useful information + case bpcOpCompressUncompressed: + c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + if d.uploadedAnnotations != nil { + tocDigest, err := chunkedToc.GetTOCDigest(d.uploadedAnnotations) + if err != nil { + return fmt.Errorf("parsing just-created compression annotations: %w", err) + } + if tocDigest != nil { + c.blobInfoCache.RecordTOCUncompressedPair(*tocDigest, srcInfo.Digest) + } + } + case bpcOpDecompressCompressed: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + case bpcOpRecompressCompressed, bpcOpPreserveCompressed: + // We know one or two compressed digests. BlobInfoCache associates compression variants via the uncompressed digest, + // and we don’t know that one. + // That also means that repeated copies with the same recompression don’t identify reuse opportunities (unless + // RecordDigestUncompressedPair was called for both compressed variants for some other reason). + case bpcOpPreserveUncompressed: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, srcInfo.Digest) + case bpcOpInvalid: + fallthrough + default: + return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation) + } + } + if d.srcCompressorBaseVariantName == "" || d.uploadedCompressorBaseVariantName == "" || d.uploadedCompressorSpecificVariantName == "" { + return fmt.Errorf("internal error: missing compressor names (src base: %q, uploaded base: %q, uploaded specific: %q)", + d.srcCompressorBaseVariantName, d.uploadedCompressorBaseVariantName, d.uploadedCompressorSpecificVariantName) + } + if d.uploadedCompressorBaseVariantName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{ + BaseVariantCompressor: d.uploadedCompressorBaseVariantName, + SpecificVariantCompressor: d.uploadedCompressorSpecificVariantName, + SpecificVariantAnnotations: d.uploadedAnnotations, + }) + } + if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest && + d.srcCompressorBaseVariantName != internalblobinfocache.UnknownCompression { + // If the source is already using some TOC-dependent variant, we either copied the + // blob as is, or perhaps decompressed it; either way we don’t trust the TOC digest, + // so record neither the variant name, nor the TOC digest. + c.blobInfoCache.RecordDigestCompressorData(srcInfo.Digest, internalblobinfocache.DigestCompressorData{ + BaseVariantCompressor: d.srcCompressorBaseVariantName, + SpecificVariantCompressor: internalblobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + }) + } + return nil +} + +// close closes objects that carry state throughout the compression/decompression operation. +func (d *bpCompressionStepData) close() { + for _, c := range d.closers { + c.Close() + } +} + +// doCompression reads all input from src and writes its compressed equivalent to dest. +func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error { + compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel) + if err != nil { + return err + } + + buf := make([]byte, compressionBufferSize) + + _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close() + if err != nil { + compressor.Close() + return err + } + + return compressor.Close() +} + +// compressGoroutine reads all input from src and writes its compressed equivalent to dest. +func (ic *imageCopier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) { + err := errors.New("Internal error: unexpected panic in compressGoroutine") + defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. + _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil + }() + + err = doCompression(dest, src, metadata, compressionFormat, ic.compressionLevel) +} + +// compressedStream returns a stream the input reader compressed using format, and a metadata map. +// The caller must close the returned reader. +// AFTER the stream is consumed, metadata will be updated with annotations to use on the data. +func (ic *imageCopier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) { + pipeReader, pipeWriter := io.Pipe() + annotations := map[string]string{} + // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, + // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, + // we don’t care. + go ic.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter + return pipeReader, annotations +} diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go new file mode 100644 index 0000000000..44675f37cf --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -0,0 +1,417 @@ +package copy + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "slices" + "time" + + "github.com/containers/image/v5/docker/reference" + internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/imagedestination" + "github.com/containers/image/v5/internal/imagesource" + internalManifest "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache" + compression "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/signature/signer" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "golang.org/x/sync/semaphore" + "golang.org/x/term" +) + +var ( + // ErrDecryptParamsMissing is returned if there is missing decryption parameters + ErrDecryptParamsMissing = errors.New("Necessary DecryptParameters not present") + + // maxParallelDownloads is used to limit the maximum number of parallel + // downloads. Let's follow Firefox by limiting it to 6. + maxParallelDownloads = uint(6) +) + +const ( + // CopySystemImage is the default value which, when set in + // Options.ImageListSelection, indicates that the caller expects only one + // image to be copied, so if the source reference refers to a list of + // images, one that matches the current system will be selected. + CopySystemImage ImageListSelection = iota + // CopyAllImages is a value which, when set in Options.ImageListSelection, + // indicates that the caller expects to copy multiple images, and if + // the source reference refers to a list, that the list and every image + // to which it refers will be copied. If the source reference refers + // to a list, the target reference can not accept lists, an error + // should be returned. + CopyAllImages + // CopySpecificImages is a value which, when set in + // Options.ImageListSelection, indicates that the caller expects the + // source reference to be either a single image or a list of images, + // and if the source reference is a list, wants only specific instances + // from it copied (or none of them, if the list of instances to copy is + // empty), along with the list itself. If the target reference can + // only accept one image (i.e., it cannot accept lists), an error + // should be returned. + CopySpecificImages +) + +// ImageListSelection is one of CopySystemImage, CopyAllImages, or +// CopySpecificImages, to control whether, when the source reference is a list, +// copy.Image() copies only an image which matches the current runtime +// environment, or all images which match the supplied reference, or only +// specific images from the source reference. +type ImageListSelection int + +// Options allows supplying non-default configuration modifying the behavior of CopyImage. +type Options struct { + RemoveSignatures bool // Remove any pre-existing signatures. Signers and SignBy… will still add a new signature. + // Signers to use to add signatures during the copy. + // Callers are still responsible for closing these Signer objects; they can be reused for multiple copy.Image operations in a row. + Signers []*signer.Signer + SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + SignPassphrase string // Passphrase to use when signing with the key ID from `SignBy`. + SignBySigstorePrivateKeyFile string // If non-empty, asks for a signature to be added during the copy, using a sigstore private key file at the provided path. + SignSigstorePrivateKeyPassphrase []byte // Passphrase to use when signing with `SignBySigstorePrivateKeyFile`. + SignIdentity reference.Named // Identify to use when signing, defaults to the docker reference of the destination + + ReportWriter io.Writer + SourceCtx *types.SystemContext + DestinationCtx *types.SystemContext + ProgressInterval time.Duration // time to wait between reports to signal the progress channel + Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + + // Preserve digests, and fail if we cannot. + PreserveDigests bool + // manifest MIME type of image set by user. "" is default and means use the autodetection to the manifest MIME type + ForceManifestMIMEType string + ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list + Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself + // Give priority to pulling gzip images if multiple images are present when configured to OptionalBoolTrue, + // prefers the best compression if this is configured as OptionalBoolFalse. Choose automatically (and the choice may change over time) + // if this is set to OptionalBoolUndefined (which is the default behavior, and recommended for most callers). + // This only affects CopySystemImage. + PreferGzipInstances types.OptionalBool + + // If OciEncryptConfig is non-nil, it indicates that an image should be encrypted. + // The encryption options is derived from the construction of EncryptConfig object. + OciEncryptConfig *encconfig.EncryptConfig + // OciEncryptLayers represents the list of layers to encrypt. + // If nil, don't encrypt any layers. + // If non-nil and len==0, denotes encrypt all layers. + // integers in the slice represent 0-indexed layer indices, with support for negative + // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. + OciEncryptLayers *[]int + // OciDecryptConfig contains the config that can be used to decrypt an image if it is + // encrypted if non-nil. If nil, it does not attempt to decrypt an image. + OciDecryptConfig *encconfig.DecryptConfig + + // A weighted semaphore to limit the amount of concurrently copied layers and configs. Applies to all copy operations using the semaphore. If set, MaxParallelDownloads is ignored. + ConcurrentBlobCopiesSemaphore *semaphore.Weighted + + // MaxParallelDownloads indicates the maximum layers to pull at the same time. Applies to a single copy operation. A reasonable default is used if this is left as 0. Ignored if ConcurrentBlobCopiesSemaphore is set. + MaxParallelDownloads uint + + // When OptimizeDestinationImageAlreadyExists is set, optimize the copy assuming that the destination image already + // exists (and is equivalent). Making the eventual (no-op) copy more performant for this case. Enabling the option + // is slightly pessimistic if the destination image doesn't exist, or is not equivalent. + OptimizeDestinationImageAlreadyExists bool + + // Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type + // to not indicate "nondistributable". + DownloadForeignLayers bool + + // Contains slice of OptionCompressionVariant, where copy will ensure that for each platform + // in the manifest list, a variant with the requested compression will exist. + // Invalid when copying a non-multi-architecture image. That will probably + // change in the future. + EnsureCompressionVariantsExist []OptionCompressionVariant + // ForceCompressionFormat ensures that the compression algorithm set in + // DestinationCtx.CompressionFormat is used exclusively, and blobs of other + // compression algorithms are not reused. + ForceCompressionFormat bool + + // ReportResolvedReference, if set, asks the destination transport to store + // a “resolved” (more detailed) reference to the created image + // into the value this option points to. + // What “resolved” means is transport-specific. + // Most transports don’t support this, and cause the value to be set to nil. + // + // For the containers-storage: transport, the reference contains an image ID, + // so that storage.ResolveReference returns exactly the created image. + // WARNING: It is unspecified whether the reference also contains a reference.Named element. + ReportResolvedReference *types.ImageReference + + // DestinationTimestamp, if set, will force timestamps of content created in the destination to this value. + // Most transports don't support this. + // + // In oci-archive: destinations, this will set the create/mod/access timestamps in each tar entry + // (but not a timestamp of the created archive file). + DestinationTimestamp *time.Time +} + +// OptionCompressionVariant allows to supply information about +// selected compression algorithm and compression level by the +// end-user. Refer to EnsureCompressionVariantsExist to know +// more about its usage. +type OptionCompressionVariant struct { + Algorithm compression.Algorithm + Level *int // Only used when we are creating a new image instance using the specified algorithm, not when the image already contains such an instance +} + +// copier allows us to keep track of diffID values for blobs, and other +// data shared across one or more images in a possible manifest list. +// The owner must call close() when done. +type copier struct { + policyContext *signature.PolicyContext + dest private.ImageDestination + rawSource private.ImageSource + options *Options // never nil + + reportWriter io.Writer + progressOutput io.Writer + + unparsedToplevel *image.UnparsedImage // for rawSource + blobInfoCache internalblobinfocache.BlobInfoCache2 + concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs + signers []*signer.Signer // Signers to use to create new signatures for the image + signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed. +} + +// Internal function to validate `requireCompressionFormatMatch` for copySingleImageOptions +func shouldRequireCompressionFormatMatch(options *Options) (bool, error) { + if options.ForceCompressionFormat && (options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil) { + return false, fmt.Errorf("cannot use ForceCompressionFormat with undefined default compression format") + } + return options.ForceCompressionFormat, nil +} + +// Image copies image from srcRef to destRef, using policyContext to validate +// source image admissibility. It returns the manifest which was written to +// the new copy of the image. +func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (copiedManifest []byte, retErr error) { + if options == nil { + options = &Options{} + } + + if err := validateImageListSelection(options.ImageListSelection); err != nil { + return nil, err + } + + reportWriter := io.Discard + + if options.ReportWriter != nil { + reportWriter = options.ReportWriter + } + + // safeClose amends retErr with an error from c.Close(), if any. + safeClose := func(name string, c io.Closer) { + err := c.Close() + if err == nil { + return + } + // Do not use %w for err as we don't want it to be unwrapped by callers. + if retErr != nil { + retErr = fmt.Errorf(" (%s: %s): %w", name, err.Error(), retErr) + } else { + retErr = fmt.Errorf(" (%s: %s)", name, err.Error()) + } + } + + publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) + if err != nil { + return nil, fmt.Errorf("initializing destination %s: %w", transports.ImageName(destRef), err) + } + dest := imagedestination.FromPublic(publicDest) + defer safeClose("dest", dest) + + publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) + if err != nil { + return nil, fmt.Errorf("initializing source %s: %w", transports.ImageName(srcRef), err) + } + rawSource := imagesource.FromPublic(publicRawSource) + defer safeClose("src", rawSource) + + // If reportWriter is not a TTY (e.g., when piping to a file), do not + // print the progress bars to avoid long and hard to parse output. + // Instead use printCopyInfo() to print single line "Copying ..." messages. + progressOutput := reportWriter + if !isTTY(reportWriter) { + progressOutput = io.Discard + } + + c := &copier{ + policyContext: policyContext, + dest: dest, + rawSource: rawSource, + options: options, + + reportWriter: reportWriter, + progressOutput: progressOutput, + + unparsedToplevel: image.UnparsedInstance(rawSource, nil), + // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. + // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more). + // Conceptually the cache settings should be in copy.Options instead. + blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)), + } + defer c.close() + c.blobInfoCache.Open() + defer c.blobInfoCache.Close() + + // Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel. + if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() { + c.concurrentBlobCopiesSemaphore = c.options.ConcurrentBlobCopiesSemaphore + if c.concurrentBlobCopiesSemaphore == nil { + max := c.options.MaxParallelDownloads + if max == 0 { + max = maxParallelDownloads + } + c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(max)) + } + } else { + c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(1)) + if c.options.ConcurrentBlobCopiesSemaphore != nil { + if err := c.options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + return nil, fmt.Errorf("acquiring semaphore for concurrent blob copies: %w", err) + } + defer c.options.ConcurrentBlobCopiesSemaphore.Release(1) + } + } + + if err := c.setupSigners(); err != nil { + return nil, err + } + + multiImage, err := isMultiImage(ctx, c.unparsedToplevel) + if err != nil { + return nil, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(srcRef), err) + } + + if !multiImage { + if len(options.EnsureCompressionVariantsExist) > 0 { + return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image") + } + requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options) + if err != nil { + return nil, err + } + // The simple case: just copy a single image. + single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch}) + if err != nil { + return nil, err + } + copiedManifest = single.manifest + } else if c.options.ImageListSelection == CopySystemImage { + if len(options.EnsureCompressionVariantsExist) > 0 { + return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image") + } + requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options) + if err != nil { + return nil, err + } + // This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that + // matches the current system to copy, and copy it. + mfest, manifestType, err := c.unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err) + } + manifestList, err := internalManifest.ListFromBlob(mfest, manifestType) + if err != nil { + return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err) + } + instanceDigest, err := manifestList.ChooseInstanceByCompression(c.options.SourceCtx, c.options.PreferGzipInstances) // try to pick one that matches c.options.SourceCtx + if err != nil { + return nil, fmt.Errorf("choosing an image from manifest list %s: %w", transports.ImageName(srcRef), err) + } + logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest) + unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) + single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch}) + if err != nil { + return nil, fmt.Errorf("copying system image from manifest list: %w", err) + } + copiedManifest = single.manifest + } else { /* c.options.ImageListSelection == CopyAllImages or c.options.ImageListSelection == CopySpecificImages, */ + // If we were asked to copy multiple images and can't, that's an error. + if !supportsMultipleImages(c.dest) { + return nil, fmt.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name()) + } + // Copy some or all of the images. + switch c.options.ImageListSelection { + case CopyAllImages: + logrus.Debugf("Source is a manifest list; copying all instances") + case CopySpecificImages: + logrus.Debugf("Source is a manifest list; copying some instances") + } + if copiedManifest, err = c.copyMultipleImages(ctx); err != nil { + return nil, err + } + } + + if options.ReportResolvedReference != nil { + *options.ReportResolvedReference = nil // The default outcome, if not specifically supported by the transport. + } + if err := c.dest.CommitWithOptions(ctx, private.CommitOptions{ + UnparsedToplevel: c.unparsedToplevel, + ReportResolvedReference: options.ReportResolvedReference, + Timestamp: options.DestinationTimestamp, + }); err != nil { + return nil, fmt.Errorf("committing the finished image: %w", err) + } + + return copiedManifest, nil +} + +// Printf writes a formatted string to c.reportWriter. +// Note that the method name Printf is not entirely arbitrary: (go tool vet) +// has a built-in list of functions/methods (whatever object they are for) +// which have their format strings checked; for other names we would have +// to pass a parameter to every (go tool vet) invocation. +func (c *copier) Printf(format string, a ...any) { + fmt.Fprintf(c.reportWriter, format, a...) +} + +// close tears down state owned by copier. +func (c *copier) close() { + for i, s := range c.signersToClose { + if err := s.Close(); err != nil { + logrus.Warnf("Error closing per-copy signer %d: %v", i+1, err) + } + } +} + +// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value +func validateImageListSelection(selection ImageListSelection) error { + switch selection { + case CopySystemImage, CopyAllImages, CopySpecificImages: + return nil + default: + return fmt.Errorf("Invalid value for options.ImageListSelection: %d", selection) + } +} + +// Checks if the destination supports accepting multiple images by checking if it can support +// manifest types that are lists of other manifests. +func supportsMultipleImages(dest types.ImageDestination) bool { + mtypes := dest.SupportedManifestMIMETypes() + if len(mtypes) == 0 { + // Anything goes! + return true + } + return slices.ContainsFunc(mtypes, manifest.MIMETypeIsMultiImage) +} + +// isTTY returns true if the io.Writer is a file and a tty. +func isTTY(w io.Writer) bool { + if f, ok := w.(*os.File); ok { + return term.IsTerminal(int(f.Fd())) + } + return false +} diff --git a/vendor/github.com/containers/image/v5/copy/digesting_reader.go b/vendor/github.com/containers/image/v5/copy/digesting_reader.go new file mode 100644 index 0000000000..4c6ba82ee5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/digesting_reader.go @@ -0,0 +1,62 @@ +package copy + +import ( + "fmt" + "hash" + "io" + + digest "github.com/opencontainers/go-digest" +) + +type digestingReader struct { + source io.Reader + digester digest.Digester + hash hash.Hash + expectedDigest digest.Digest + validationFailed bool + validationSucceeded bool +} + +// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error +// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. +// (neither is set if EOF is never reached). +func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { + var digester digest.Digester + if err := expectedDigest.Validate(); err != nil { + return nil, fmt.Errorf("invalid digest specification %q: %w", expectedDigest, err) + } + digestAlgorithm := expectedDigest.Algorithm() + if !digestAlgorithm.Available() { + return nil, fmt.Errorf("invalid digest specification %q: unsupported digest algorithm %q", expectedDigest, digestAlgorithm) + } + digester = digestAlgorithm.Digester() + + return &digestingReader{ + source: source, + digester: digester, + hash: digester.Hash(), + expectedDigest: expectedDigest, + validationFailed: false, + }, nil +} + +func (d *digestingReader) Read(p []byte) (int, error) { + n, err := d.source.Read(p) + if n > 0 { + if n2, err := d.hash.Write(p[:n]); n2 != n || err != nil { + // Coverage: This should not happen, the hash.Hash interface requires + // d.digest.Write to never return an error, and the io.Writer interface + // requires n2 == len(input) if no error is returned. + return 0, fmt.Errorf("updating digest during verification: %d vs. %d: %w", n2, n, err) + } + } + if err == io.EOF { + actualDigest := d.digester.Digest() + if actualDigest != d.expectedDigest { + d.validationFailed = true + return 0, fmt.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) + } + d.validationSucceeded = true + } + return n, err +} diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go new file mode 100644 index 0000000000..b5a88a914b --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/encryption.go @@ -0,0 +1,138 @@ +package copy + +import ( + "fmt" + "maps" + "slices" + "strings" + + "github.com/containers/image/v5/types" + "github.com/containers/ocicrypt" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// isOciEncrypted returns a bool indicating if a mediatype is encrypted +// This function will be moved to be part of OCI spec when adopted. +func isOciEncrypted(mediatype string) bool { + return strings.HasSuffix(mediatype, "+encrypted") +} + +// isEncrypted checks if an image is encrypted +func isEncrypted(i types.Image) bool { + layers := i.LayerInfos() + return slices.ContainsFunc(layers, func(l types.BlobInfo) bool { + return isOciEncrypted(l.MediaType) + }) +} + +// bpDecryptionStepData contains data that the copy pipeline needs about the decryption step. +type bpDecryptionStepData struct { + decrypting bool // We are actually decrypting the stream +} + +// blobPipelineDecryptionStep updates *stream to decrypt if, it necessary. +// srcInfo is only used for error messages. +// Returns data for other steps; the caller should eventually use updateCryptoOperation. +func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) { + if !isOciEncrypted(stream.info.MediaType) || ic.c.options.OciDecryptConfig == nil { + return &bpDecryptionStepData{ + decrypting: false, + }, nil + } + + if ic.cannotModifyManifestReason != "" { + return nil, fmt.Errorf("layer %s should be decrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason) + } + + desc := imgspecv1.Descriptor{ + Annotations: stream.info.Annotations, + } + // DecryptLayer supposedly returns a digest of the decrypted stream. + // In practice, that value is never set in the current implementation. + // And we shouldn’t use it anyway, because it is not trusted: encryption can be made to a public key, + // i.e. it doesn’t authenticate the origin of the metadata in any way. + reader, _, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false) + if err != nil { + return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err) + } + + stream.reader = reader + stream.info.Digest = "" + stream.info.Size = -1 + maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool { + return strings.HasPrefix(k, "org.opencontainers.image.enc") + }) + return &bpDecryptionStepData{ + decrypting: true, + }, nil +} + +// updateCryptoOperation sets *operation, if necessary. +func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypto) { + if d.decrypting { + *operation = types.Decrypt + } +} + +// bpEncryptionStepData contains data that the copy pipeline needs about the encryption step. +type bpEncryptionStepData struct { + encrypting bool // We are actually encrypting the stream + finalizer ocicrypt.EncryptLayerFinalizer +} + +// blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt. +// srcInfo is primarily used for error messages. +// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations. +func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo, + decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) { + if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.options.OciEncryptConfig == nil { + return &bpEncryptionStepData{ + encrypting: false, + }, nil + } + + if ic.cannotModifyManifestReason != "" { + return nil, fmt.Errorf("layer %s should be encrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason) + } + + var annotations map[string]string + if !decryptionStep.decrypting { + annotations = srcInfo.Annotations + } + desc := imgspecv1.Descriptor{ + MediaType: srcInfo.MediaType, + Digest: srcInfo.Digest, + Size: srcInfo.Size, + Annotations: annotations, + } + reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.options.OciEncryptConfig, stream.reader, desc) + if err != nil { + return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err) + } + + stream.reader = reader + stream.info.Digest = "" + stream.info.Size = -1 + return &bpEncryptionStepData{ + encrypting: true, + finalizer: finalizer, + }, nil +} + +// updateCryptoOperationAndAnnotations sets *operation and updates *annotations, if necessary. +func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *types.LayerCrypto, annotations *map[string]string) error { + if !d.encrypting { + return nil + } + + encryptAnnotations, err := d.finalizer() + if err != nil { + return fmt.Errorf("Unable to finalize encryption: %w", err) + } + *operation = types.Encrypt + if *annotations == nil { + *annotations = map[string]string{} + } + maps.Copy(*annotations, encryptAnnotations) + return nil +} diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go new file mode 100644 index 0000000000..97837f9f28 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -0,0 +1,253 @@ +package copy + +import ( + "context" + "errors" + "fmt" + "slices" + "strings" + + internalManifest "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/manifest" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. +// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location. +// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. +var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} + +// allManifestMIMETypes lists all possible manifest MIME types. +var allManifestMIMETypes = []string{v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType} + +// orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once. +type orderedSet struct { + list []string + included *set.Set[string] +} + +// newOrderedSet creates a correctly initialized orderedSet. +// [Sometimes it would be really nice if Golang had constructors…] +func newOrderedSet() *orderedSet { + return &orderedSet{ + list: []string{}, + included: set.New[string](), + } +} + +// append adds s to the end of os, only if it is not included already. +func (os *orderedSet) append(s string) { + if !os.included.Contains(s) { + os.list = append(os.list, s) + os.included.Add(s) + } +} + +// determineManifestConversionInputs contains the inputs for determineManifestConversion. +type determineManifestConversionInputs struct { + srcMIMEType string // MIME type of the input manifest + + destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes() + + forceManifestMIMEType string // User’s choice of forced manifest MIME type + requestedCompressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user _explictily_ requested one. + requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can +} + +// manifestConversionPlan contains the decisions made by determineManifestConversion. +type manifestConversionPlan struct { + // The preferred manifest MIME type (whether we are converting to it or using it unmodified). + // We compute this only to show it in error messages; without having to add this context + // in an error message, we would be happy enough to know only that no conversion is needed. + preferredMIMEType string + preferredMIMETypeNeedsConversion bool // True if using preferredMIMEType requires a conversion step. + otherMIMETypeCandidates []string // Other possible alternatives, in order +} + +// determineManifestConversion returns a plan for what formats, and possibly conversions, to use based on in. +func determineManifestConversion(in determineManifestConversionInputs) (manifestConversionPlan, error) { + srcType := in.srcMIMEType + normalizedSrcType := manifest.NormalizedMIMEType(srcType) + if srcType != normalizedSrcType { + logrus.Debugf("Source manifest MIME type %q, treating it as %q", srcType, normalizedSrcType) + srcType = normalizedSrcType + } + + destSupportedManifestMIMETypes := in.destSupportedManifestMIMETypes + if in.forceManifestMIMEType != "" { + destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType} + } + if len(destSupportedManifestMIMETypes) == 0 { + destSupportedManifestMIMETypes = allManifestMIMETypes + } + + restrictiveCompressionRequired := in.requestedCompressionFormat != nil && !internalManifest.CompressionAlgorithmIsUniversallySupported(*in.requestedCompressionFormat) + supportedByDest := set.New[string]() + for _, t := range destSupportedManifestMIMETypes { + if in.requiresOCIEncryption && !manifest.MIMETypeSupportsEncryption(t) { + continue + } + if restrictiveCompressionRequired && !internalManifest.MIMETypeSupportsCompressionAlgorithm(t, *in.requestedCompressionFormat) { + continue + } + supportedByDest.Add(t) + } + if supportedByDest.Empty() { + if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by allManifestMIMETypes + return manifestConversionPlan{}, errors.New("internal error: destSupportedManifestMIMETypes is empty") + } + // We know, and have verified, that destSupportedManifestMIMETypes is not empty, so some filtering of supported MIME types must have been involved. + + // destSupportedManifestMIMETypes has three possible origins: + if in.forceManifestMIMEType != "" { // 1. forceManifestType specified + switch { + case in.requiresOCIEncryption && restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required together with format %s, which does not support both", + in.requestedCompressionFormat.Name(), in.forceManifestMIMEType) + case in.requiresOCIEncryption: + return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption", + in.forceManifestMIMEType) + case restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s required together with format %s, which does not support it", + in.requestedCompressionFormat.Name(), in.forceManifestMIMEType) + default: + return manifestConversionPlan{}, errors.New("internal error: forceManifestMIMEType was rejected for an unknown reason") + } + } + if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen allManifestTypes + if !restrictiveCompressionRequired { + // Coverage: This should never happen. + // If we have not rejected for encryption reasons, we must have rejected due to encryption, but + // allManifestTypes includes OCI, which supports encryption. + return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well") + } + // This can legitimately happen when the user asks for completely unsupported formats like Bzip2 or Xz. + return manifestConversionPlan{}, fmt.Errorf("compression using %s required, but none of the known manifest formats support it", in.requestedCompressionFormat.Name()) + } + // 3. destination accepts a restricted list of mime types + destMIMEList := strings.Join(destSupportedManifestMIMETypes, ", ") + switch { + case in.requiresOCIEncryption && restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required but the destination only supports MIME types [%s], none of which support both", + in.requestedCompressionFormat.Name(), destMIMEList) + case in.requiresOCIEncryption: + return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption", + destMIMEList) + case restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s required but the destination only supports MIME types [%s], none of which support it", + in.requestedCompressionFormat.Name(), destMIMEList) + default: // Coverage: This should never happen, we only filter for in.requiresOCIEncryption || restrictiveCompressionRequired + return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and we are neither encrypting nor requiring a restrictive compression algorithm") + } + } + + // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. + // So, build a list of types to try in order of decreasing preference. + // FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct, + // although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other. + // In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types + // and never attempt the other one. + prioritizedTypes := newOrderedSet() + + // First of all, prefer to keep the original manifest unmodified. + if supportedByDest.Contains(srcType) { + prioritizedTypes.append(srcType) + } + if in.cannotModifyManifestReason != "" { + // We could also drop this check and have the caller + // make the choice; it is already doing that to an extent, to improve error + // messages. But it is nice to hide the “if we can't modify, do no conversion” + // special case in here; the caller can then worry (or not) only about a good UI. + logrus.Debugf("We can't modify the manifest, hoping for the best...") + return manifestConversionPlan{ // Take our chances - FIXME? Or should we fail without trying? + preferredMIMEType: srcType, + otherMIMETypeCandidates: []string{}, + }, nil + } + + // Then use our list of preferred types. + for _, t := range preferredManifestMIMETypes { + if supportedByDest.Contains(t) { + prioritizedTypes.append(t) + } + } + + // Finally, try anything else the destination supports. + for _, t := range destSupportedManifestMIMETypes { + if supportedByDest.Contains(t) { + prioritizedTypes.append(t) + } + } + + logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) + if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen. + return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types") + } + res := manifestConversionPlan{ + preferredMIMEType: prioritizedTypes.list[0], + otherMIMETypeCandidates: prioritizedTypes.list[1:], + } + res.preferredMIMETypeNeedsConversion = res.preferredMIMEType != srcType + if !res.preferredMIMETypeNeedsConversion { + logrus.Debugf("... will first try using the original manifest unmodified") + } + return res, nil +} + +// isMultiImage returns true if img is a list of images +func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) { + _, mt, err := img.Manifest(ctx) + if err != nil { + return false, err + } + return manifest.MIMETypeIsMultiImage(mt), nil +} + +// determineListConversion takes the current MIME type of a list of manifests, +// the list of MIME types supported for a given destination, and a possible +// forced value, and returns the MIME type to which we should convert the list +// of manifests (regardless of whether we are converting to it or using it +// unmodified) and a slice of other list types which might be supported by the +// destination. +func (c *copier) determineListConversion(currentListMIMEType string, destSupportedMIMETypes []string, forcedListMIMEType string) (string, []string, error) { + // If there's no list of supported types, then anything we support is expected to be supported. + if len(destSupportedMIMETypes) == 0 { + destSupportedMIMETypes = manifest.SupportedListMIMETypes + } + // If we're forcing it, replace the list of supported types with the forced value. + if forcedListMIMEType != "" { + destSupportedMIMETypes = []string{forcedListMIMEType} + } + + prioritizedTypes := newOrderedSet() + // The first priority is the current type, if it's in the list, since that lets us avoid a + // conversion that isn't strictly necessary. + if slices.Contains(destSupportedMIMETypes, currentListMIMEType) { + prioritizedTypes.append(currentListMIMEType) + } + // Pick out the other list types that we support. + for _, t := range destSupportedMIMETypes { + if manifest.MIMETypeIsMultiImage(t) { + prioritizedTypes.append(t) + } + } + + logrus.Debugf("Manifest list has MIME type %q, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", ")) + if len(prioritizedTypes.list) == 0 { + return "", nil, fmt.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes) + } + selectedType := prioritizedTypes.list[0] + otherSupportedTypes := prioritizedTypes.list[1:] + if selectedType != currentListMIMEType { + logrus.Debugf("... will convert to %s first, and then try %v", selectedType, otherSupportedTypes) + } else { + logrus.Debugf("... will use the original manifest list type, and then try %v", otherSupportedTypes) + } + // Done. + return selectedType, otherSupportedTypes, nil +} diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go new file mode 100644 index 0000000000..b0c107e8ca --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/multiple.go @@ -0,0 +1,354 @@ +package copy + +import ( + "bytes" + "context" + "errors" + "fmt" + "maps" + "slices" + "sort" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + internalManifest "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +type instanceCopyKind int + +const ( + instanceCopyCopy instanceCopyKind = iota + instanceCopyClone +) + +type instanceCopy struct { + op instanceCopyKind + sourceDigest digest.Digest + + // Fields which can be used by callers when operation + // is `instanceCopyCopy` + copyForceCompressionFormat bool + + // Fields which can be used by callers when operation + // is `instanceCopyClone` + cloneArtifactType string + cloneCompressionVariant OptionCompressionVariant + clonePlatform *imgspecv1.Platform + cloneAnnotations map[string]string +} + +// internal type only to make imgspecv1.Platform comparable +type platformComparable struct { + architecture string + os string + osVersion string + osFeatures string + variant string +} + +// Converts imgspecv1.Platform to a comparable format. +func platformV1ToPlatformComparable(platform *imgspecv1.Platform) platformComparable { + if platform == nil { + return platformComparable{} + } + osFeatures := slices.Clone(platform.OSFeatures) + sort.Strings(osFeatures) + return platformComparable{architecture: platform.Architecture, + os: platform.OS, + // This is strictly speaking ambiguous, fields of OSFeatures can contain a ','. Probably good enough for now. + osFeatures: strings.Join(osFeatures, ","), + osVersion: platform.OSVersion, + variant: platform.Variant, + } +} + +// platformCompressionMap prepares a mapping of platformComparable -> CompressionAlgorithmNames for given digests +func platformCompressionMap(list internalManifest.List, instanceDigests []digest.Digest) (map[platformComparable]*set.Set[string], error) { + res := make(map[platformComparable]*set.Set[string]) + for _, instanceDigest := range instanceDigests { + instanceDetails, err := list.Instance(instanceDigest) + if err != nil { + return nil, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err) + } + platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform) + platformSet, ok := res[platform] + if !ok { + platformSet = set.New[string]() + res[platform] = platformSet + } + platformSet.AddSeq(slices.Values(instanceDetails.ReadOnly.CompressionAlgorithmNames)) + } + return res, nil +} + +func validateCompressionVariantExists(input []OptionCompressionVariant) error { + for _, option := range input { + _, err := compression.AlgorithmByName(option.Algorithm.Name()) + if err != nil { + return fmt.Errorf("invalid algorithm %q in option.EnsureCompressionVariantsExist: %w", option.Algorithm.Name(), err) + } + } + return nil +} + +// prepareInstanceCopies prepares a list of instances which needs to copied to the manifest list. +func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest.Digest, options *Options) ([]instanceCopy, error) { + res := []instanceCopy{} + if options.ImageListSelection == CopySpecificImages && len(options.EnsureCompressionVariantsExist) > 0 { + // List can already contain compressed instance for a compression selected in `EnsureCompressionVariantsExist` + // It’s unclear what it means when `CopySpecificImages` includes an instance in options.Instances, + // EnsureCompressionVariantsExist asks for an instance with some compression, + // an instance with that compression already exists, but is not included in options.Instances. + // We might define the semantics and implement this in the future. + return res, fmt.Errorf("EnsureCompressionVariantsExist is not implemented for CopySpecificImages") + } + err := validateCompressionVariantExists(options.EnsureCompressionVariantsExist) + if err != nil { + return res, err + } + compressionsByPlatform, err := platformCompressionMap(list, instanceDigests) + if err != nil { + return nil, err + } + for i, instanceDigest := range instanceDigests { + if options.ImageListSelection == CopySpecificImages && + !slices.Contains(options.Instances, instanceDigest) { + logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) + continue + } + instanceDetails, err := list.Instance(instanceDigest) + if err != nil { + return res, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err) + } + forceCompressionFormat, err := shouldRequireCompressionFormatMatch(options) + if err != nil { + return nil, err + } + res = append(res, instanceCopy{ + op: instanceCopyCopy, + sourceDigest: instanceDigest, + copyForceCompressionFormat: forceCompressionFormat, + }) + platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform) + compressionList := compressionsByPlatform[platform] + for _, compressionVariant := range options.EnsureCompressionVariantsExist { + if !compressionList.Contains(compressionVariant.Algorithm.Name()) { + res = append(res, instanceCopy{ + op: instanceCopyClone, + sourceDigest: instanceDigest, + cloneArtifactType: instanceDetails.ReadOnly.ArtifactType, + cloneCompressionVariant: compressionVariant, + clonePlatform: instanceDetails.ReadOnly.Platform, + cloneAnnotations: maps.Clone(instanceDetails.ReadOnly.Annotations), + }) + // add current compression to the list so that we don’t create duplicate clones + compressionList.Add(compressionVariant.Algorithm.Name()) + } + } + } + return res, nil +} + +// copyMultipleImages copies some or all of an image list's instances, using +// c.policyContext to validate source image admissibility. +func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, retErr error) { + // Parse the list and get a copy of the original value after it's re-encoded. + manifestList, manifestType, err := c.unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, fmt.Errorf("reading manifest list: %w", err) + } + originalList, err := internalManifest.ListFromBlob(manifestList, manifestType) + if err != nil { + return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err) + } + updatedList := originalList.CloneInternal() + + sigs, err := c.sourceSignatures(ctx, c.unparsedToplevel, + "Getting image list signatures", + "Checking if image list destination supports signatures") + if err != nil { + return nil, err + } + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + matches, err := manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return nil, fmt.Errorf("computing digest of source image's manifest: %w", err) + } + if !matches { + return nil, errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + + // Determine if we're allowed to modify the manifest list. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copySingleImage. + cannotModifyManifestListReason := "" + if len(sigs) > 0 { + cannotModifyManifestListReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestListReason = "Destination specifies a digest" + } + if c.options.PreserveDigests { + cannotModifyManifestListReason = "Instructed to preserve digests" + } + + // Determine if we'll need to convert the manifest list to a different format. + forceListMIMEType := c.options.ForceManifestMIMEType + switch forceListMIMEType { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + forceListMIMEType = manifest.DockerV2ListMediaType + case imgspecv1.MediaTypeImageManifest: + forceListMIMEType = imgspecv1.MediaTypeImageIndex + } + selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType) + if err != nil { + return nil, fmt.Errorf("determining manifest list type to write to destination: %w", err) + } + if selectedListType != originalList.MIMEType() { + if cannotModifyManifestListReason != "" { + return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason) + } + } + + // Copy each image, or just the ones we want to copy, in turn. + instanceDigests := updatedList.Instances() + instanceEdits := []internalManifest.ListEdit{} + instanceCopyList, err := prepareInstanceCopies(updatedList, instanceDigests, c.options) + if err != nil { + return nil, fmt.Errorf("preparing instances for copy: %w", err) + } + c.Printf("Copying %d images generated from %d images in list\n", len(instanceCopyList), len(instanceDigests)) + for i, instance := range instanceCopyList { + // Update instances to be edited by their `ListOperation` and + // populate necessary fields. + switch instance.op { + case instanceCopyCopy: + logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList)) + c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList)) + unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest) + updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: instance.copyForceCompressionFormat}) + if err != nil { + return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err) + } + // Record the result of a possible conversion here. + instanceEdits = append(instanceEdits, internalManifest.ListEdit{ + ListOperation: internalManifest.ListOpUpdate, + UpdateOldDigest: instance.sourceDigest, + UpdateDigest: updated.manifestDigest, + UpdateSize: int64(len(updated.manifest)), + UpdateCompressionAlgorithms: updated.compressionAlgorithms, + UpdateMediaType: updated.manifestMIMEType}) + case instanceCopyClone: + logrus.Debugf("Replicating instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList)) + c.Printf("Replicating image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList)) + unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest) + updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{ + requireCompressionFormatMatch: true, + compressionFormat: &instance.cloneCompressionVariant.Algorithm, + compressionLevel: instance.cloneCompressionVariant.Level}) + if err != nil { + return nil, fmt.Errorf("replicating image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err) + } + // Record the result of a possible conversion here. + instanceEdits = append(instanceEdits, internalManifest.ListEdit{ + ListOperation: internalManifest.ListOpAdd, + AddDigest: updated.manifestDigest, + AddSize: int64(len(updated.manifest)), + AddMediaType: updated.manifestMIMEType, + AddArtifactType: instance.cloneArtifactType, + AddPlatform: instance.clonePlatform, + AddAnnotations: instance.cloneAnnotations, + AddCompressionAlgorithms: updated.compressionAlgorithms, + }) + default: + return nil, fmt.Errorf("copying image: invalid copy operation %d", instance.op) + } + } + + // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made. + if err = updatedList.EditInstances(instanceEdits); err != nil { + return nil, fmt.Errorf("updating manifest list: %w", err) + } + + // Iterate through supported list types, preferred format first. + c.Printf("Writing manifest list to image destination\n") + var errs []string + for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) { + var attemptedList internalManifest.ListPublic = updatedList + + logrus.Debugf("Trying to use manifest list type %s…", thisListType) + + // Perform the list conversion, if we need one. + if thisListType != updatedList.MIMEType() { + attemptedList, err = updatedList.ConvertToMIMEType(thisListType) + if err != nil { + return nil, fmt.Errorf("converting manifest list to list with MIME type %q: %w", thisListType, err) + } + } + + // Check if the updates or a type conversion meaningfully changed the list of images + // by serializing them both so that we can compare them. + attemptedManifestList, err := attemptedList.Serialize() + if err != nil { + return nil, fmt.Errorf("encoding updated manifest list (%q: %#v): %w", updatedList.MIMEType(), updatedList.Instances(), err) + } + originalManifestList, err := originalList.Serialize() + if err != nil { + return nil, fmt.Errorf("encoding original manifest list for comparison (%q: %#v): %w", originalList.MIMEType(), originalList.Instances(), err) + } + + // If we can't just use the original value, but we have to change it, flag an error. + if !bytes.Equal(attemptedManifestList, originalManifestList) { + if cannotModifyManifestListReason != "" { + return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason) + } + logrus.Debugf("Manifest list has been updated") + } else { + // We can just use the original value, so use it instead of the one we just rebuilt, so that we don't change the digest. + attemptedManifestList = manifestList + } + + // Save the manifest list. + err = c.dest.PutManifest(ctx, attemptedManifestList, nil) + if err != nil { + logrus.Debugf("Upload of manifest list type %s failed: %v", thisListType, err) + errs = append(errs, fmt.Sprintf("%s(%v)", thisListType, err)) + continue + } + errs = nil + manifestList = attemptedManifestList + break + } + if errs != nil { + return nil, fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", ")) + } + + // Sign the manifest list. + newSigs, err := c.createSignatures(ctx, manifestList, c.options.SignIdentity) + if err != nil { + return nil, err + } + sigs = append(slices.Clone(sigs), newSigs...) + + c.Printf("Storing list signatures\n") + if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil { + return nil, fmt.Errorf("writing signatures: %w", err) + } + + return manifestList, nil +} diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go new file mode 100644 index 0000000000..59f41d2169 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -0,0 +1,177 @@ +package copy + +import ( + "context" + "fmt" + "io" + "math" + "time" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" + "github.com/vbauerster/mpb/v8" + "github.com/vbauerster/mpb/v8/decor" +) + +// newProgressPool creates a *mpb.Progress. +// The caller must eventually call pool.Wait() after the pool will no longer be updated. +// NOTE: Every progress bar created within the progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called. +func (c *copier) newProgressPool() *mpb.Progress { + return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) +} + +// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar +func customPartialBlobDecorFunc(s decor.Statistics) string { + current := decor.SizeB1024(s.Current) + total := decor.SizeB1024(s.Total) + refill := decor.SizeB1024(s.Refill) + if s.Total == 0 { + return fmt.Sprintf("%.1f / %.1f (skipped: %.1f)", current, total, refill) + } + // If we didn't do a partial fetch then let's not output a distracting ("skipped: 0.0b = 0.00%") + if s.Refill == 0 { + return fmt.Sprintf("%.1f / %.1f", current, total) + } + percentage := 100.0 * float64(s.Refill) / float64(s.Total) + return fmt.Sprintf("%.1f / %.1f (skipped: %.1f = %.2f%%)", current, total, refill, percentage) +} + +// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods. +type progressBar struct { + *mpb.Bar + originalSize int64 // or -1 if unknown +} + +// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter +// is io.Discard, the progress bar's output will be discarded. Callers may call printCopyInfo() +// to print a single line instead. +// +// NOTE: Every progress bar created within a progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. +// +// As a convention, most users of progress bars should call mark100PercentComplete on full success; +// by convention, we don't leave progress bars in partial state when fully done +// (even if we copied much less data than anticipated). +func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) (*progressBar, error) { + // shortDigestLen is the length of the digest used for blobs. + const shortDigestLen = 12 + + if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly. + return nil, err + } + prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) + // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. + maxPrefixLen := len("Copying blob ") + shortDigestLen + if len(prefix) > maxPrefixLen { + prefix = prefix[:maxPrefixLen] + } + + // onComplete will replace prefix once the bar/spinner has completed + onComplete = prefix + " " + onComplete + + // Use a normal progress bar when we know the size (i.e., size > 0). + // Otherwise, use a spinner to indicate that something's happening. + var bar *mpb.Bar + if info.Size > 0 { + if partial { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.Any(customPartialBlobDecorFunc), + ), + ) + } else { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), + decor.Name(" | "), + decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""), + ), + ) + } + } else { + bar = pool.New(0, + mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(), + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""), + ), + ) + } + return &progressBar{ + Bar: bar, + originalSize: info.Size, + }, nil +} + +// printCopyInfo prints a "Copying ..." message on the copier if the output is +// set to `io.Discard`. In that case, the progress bars won't be rendered but +// we still want to indicate when blobs and configs are copied. +func (c *copier) printCopyInfo(kind string, info types.BlobInfo) { + if c.progressOutput == io.Discard { + c.Printf("Copying %s %s\n", kind, info.Digest) + } +} + +// mark100PercentComplete marks the progress bars as 100% complete; +// it may do so by possibly advancing the current state if it is below the known total. +func (bar *progressBar) mark100PercentComplete() { + if bar.originalSize > 0 { + // We can't call bar.SetTotal even if we wanted to; the total can not be changed + // after a progress bar is created with a definite total. + bar.SetCurrent(bar.originalSize) // This triggers the completion condition. + } else { + // -1 = unknown size + // 0 is somewhat of a special case: Unlike c/image, where 0 is a definite known + // size (possible at least in theory), in mpb, zero-sized progress bars are treated + // as unknown size, in particular they are not configured to be marked as + // complete on bar.Current() reaching bar.total (because that would happen already + // when creating the progress bar). + // That means that we are both _allowed_ to call SetTotal, and we _have to_. + bar.SetTotal(-1, true) // total < 0 = set it to bar.Current(), report it; and mark the bar as complete. + } +} + +// blobChunkAccessorProxy wraps a BlobChunkAccessor and updates a *progressBar +// with the number of received bytes. +type blobChunkAccessorProxy struct { + wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor + bar *progressBar // A progress bar updated with the number of bytes read so far +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +// If the Length for the last chunk is set to math.MaxUint64, then it +// fully fetches the remaining data from the offset to the end of the blob. +func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + start := time.Now() + rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks) + if err == nil { + total := int64(0) + for _, c := range chunks { + // do not update the progress bar if there is a chunk with unknown length. + if c.Length == math.MaxUint64 { + return rc, errs, err + } + total += int64(c.Length) + } + s.bar.EwmaIncrInt64(total, time.Since(start)) + } + return rc, errs, err +} diff --git a/vendor/github.com/containers/image/v5/copy/progress_channel.go b/vendor/github.com/containers/image/v5/copy/progress_channel.go new file mode 100644 index 0000000000..d5e9e09bda --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/progress_channel.go @@ -0,0 +1,79 @@ +package copy + +import ( + "io" + "time" + + "github.com/containers/image/v5/types" +) + +// progressReader is a reader that reports its progress to a types.ProgressProperties channel on an interval. +type progressReader struct { + source io.Reader + channel chan<- types.ProgressProperties + interval time.Duration + artifact types.BlobInfo + lastUpdate time.Time + offset uint64 + offsetUpdate uint64 +} + +// newProgressReader creates a new progress reader for: +// `source`: The source when internally reading bytes +// `channel`: The reporter channel to which the progress will be sent +// `interval`: The update interval to indicate how often the progress should update +// `artifact`: The blob metadata which is currently being progressed +func newProgressReader( + source io.Reader, + channel chan<- types.ProgressProperties, + interval time.Duration, + artifact types.BlobInfo, +) *progressReader { + // The progress reader constructor informs the progress channel + // that a new artifact will be read + channel <- types.ProgressProperties{ + Event: types.ProgressEventNewArtifact, + Artifact: artifact, + } + return &progressReader{ + source: source, + channel: channel, + interval: interval, + artifact: artifact, + lastUpdate: time.Now(), + offset: 0, + offsetUpdate: 0, + } +} + +// reportDone indicates to the internal channel that the progress has been +// finished +func (r *progressReader) reportDone() { + r.channel <- types.ProgressProperties{ + Event: types.ProgressEventDone, + Artifact: r.artifact, + Offset: r.offset, + OffsetUpdate: r.offsetUpdate, + } +} + +// Read continuously reads bytes into the progress reader and reports the +// status via the internal channel +func (r *progressReader) Read(p []byte) (int, error) { + n, err := r.source.Read(p) + r.offset += uint64(n) + r.offsetUpdate += uint64(n) + + // Fire the progress reader in the provided interval + if time.Since(r.lastUpdate) > r.interval { + r.channel <- types.ProgressProperties{ + Event: types.ProgressEventRead, + Artifact: r.artifact, + Offset: r.offset, + OffsetUpdate: r.offsetUpdate, + } + r.lastUpdate = time.Now() + r.offsetUpdate = 0 + } + return n, err +} diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go new file mode 100644 index 0000000000..7ddfe917bb --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/sign.go @@ -0,0 +1,115 @@ +package copy + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/private" + internalsig "github.com/containers/image/v5/internal/signature" + internalSigner "github.com/containers/image/v5/internal/signer" + "github.com/containers/image/v5/signature/sigstore" + "github.com/containers/image/v5/signature/simplesigning" + "github.com/containers/image/v5/transports" +) + +// setupSigners initializes c.signers. +func (c *copier) setupSigners() error { + c.signers = append(c.signers, c.options.Signers...) + // c.signersToClose is intentionally not updated with c.options.Signers. + + // We immediately append created signers to c.signers, and we rely on c.close() to clean them up; so we don’t need + // to clean up any created signers on failure. + + if c.options.SignBy != "" { + opts := []simplesigning.Option{ + simplesigning.WithKeyFingerprint(c.options.SignBy), + } + if c.options.SignPassphrase != "" { + opts = append(opts, simplesigning.WithPassphrase(c.options.SignPassphrase)) + } + signer, err := simplesigning.NewSigner(opts...) + if err != nil { + return err + } + c.signers = append(c.signers, signer) + c.signersToClose = append(c.signersToClose, signer) + } + + if c.options.SignBySigstorePrivateKeyFile != "" { + signer, err := sigstore.NewSigner( + sigstore.WithPrivateKeyFile(c.options.SignBySigstorePrivateKeyFile, c.options.SignSigstorePrivateKeyPassphrase), + ) + if err != nil { + return err + } + c.signers = append(c.signers, signer) + c.signersToClose = append(c.signersToClose, signer) + } + + return nil +} + +// sourceSignatures returns signatures from unparsedSource, +// and verifies that they can be used (to avoid copying a large image when we +// can tell in advance that it would ultimately fail) +func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage, + gettingSignaturesMessage, checkingDestMessage string) ([]internalsig.Signature, error) { + var sigs []internalsig.Signature + if c.options.RemoveSignatures { + sigs = []internalsig.Signature{} + } else { + c.Printf("%s\n", gettingSignaturesMessage) + s, err := unparsed.UntrustedSignatures(ctx) + if err != nil { + return nil, fmt.Errorf("reading signatures: %w", err) + } + sigs = s + } + if len(sigs) != 0 { + c.Printf("%s\n", checkingDestMessage) + if err := c.dest.SupportsSignatures(ctx); err != nil { + return nil, fmt.Errorf("Can not copy signatures to %s: %w", transports.ImageName(c.dest.Reference()), err) + } + } + return sigs, nil +} + +// createSignatures creates signatures for manifest and an optional identity. +func (c *copier) createSignatures(ctx context.Context, manifest []byte, identity reference.Named) ([]internalsig.Signature, error) { + if len(c.signers) == 0 { + // We must exit early here, otherwise copies with no Docker reference wouldn’t be possible. + return nil, nil + } + + if identity != nil { + if reference.IsNameOnly(identity) { + return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity.String()) + } + } else { + identity = c.dest.Reference().DockerReference() + if identity == nil { + return nil, fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + } + } + + res := make([]internalsig.Signature, 0, len(c.signers)) + for signerIndex, signer := range c.signers { + msg := internalSigner.ProgressMessage(signer) + if len(c.signers) == 1 { + c.Printf("Creating signature: %s\n", msg) + } else { + c.Printf("Creating signature %d: %s\n", signerIndex+1, msg) + } + newSig, err := internalSigner.SignImageManifest(ctx, signer, manifest, identity) + if err != nil { + if len(c.signers) == 1 { + return nil, fmt.Errorf("creating signature: %w", err) + } else { + return nil, fmt.Errorf("creating signature %d: %w", signerIndex+1, err) + } + } + res = append(res, newSig) + } + return res, nil +} diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go new file mode 100644 index 0000000000..f7aca8381d --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -0,0 +1,1003 @@ +package copy + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "iter" + "maps" + "reflect" + "slices" + "strings" + "sync" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/pkg/platform" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + chunkedToc "github.com/containers/storage/pkg/chunked/toc" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "github.com/vbauerster/mpb/v8" +) + +// imageCopier tracks state specific to a single image (possibly an item of a manifest list) +type imageCopier struct { + c *copier + manifestUpdates *types.ManifestUpdateOptions + src *image.SourcedImage + manifestConversionPlan manifestConversionPlan + diffIDsAreNeeded bool + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can + canSubstituteBlobs bool + compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil. + compressionLevel *int + requireCompressionFormatMatch bool +} + +type copySingleImageOptions struct { + requireCompressionFormatMatch bool + compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil. + compressionLevel *int +} + +// copySingleImageResult carries data produced by copySingleImage +type copySingleImageResult struct { + manifest []byte + manifestMIMEType string + manifestDigest digest.Digest + compressionAlgorithms []compressiontypes.Algorithm +} + +// copySingleImage copies a single (non-manifest-list) image unparsedImage, using c.policyContext to validate +// source image admissibility. +func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest, opts copySingleImageOptions) (copySingleImageResult, error) { + // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. + // Make sure we fail cleanly in such cases. + multiImage, err := isMultiImage(ctx, unparsedImage) + if err != nil { + // FIXME FIXME: How to name a reference for the sub-image? + return copySingleImageResult{}, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err) + } + if multiImage { + return copySingleImageResult{}, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") + } + + // Please keep this policy check BEFORE reading any other information about the image. + // (The multiImage check above only matches the MIME type, which we have received anyway. + // Actual parsing of anything should be deferred.) + if allowed, err := c.policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. + return copySingleImageResult{}, fmt.Errorf("Source image rejected: %w", err) + } + src, err := image.FromUnparsedImage(ctx, c.options.SourceCtx, unparsedImage) + if err != nil { + return copySingleImageResult{}, fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err) + } + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's + // one item from a manifest list that matches it, accept that as a match. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest()) + if err != nil { + return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err) + } + if !matches { + manifestList, _, err := c.unparsedToplevel.Manifest(ctx) + if err != nil { + return copySingleImageResult{}, fmt.Errorf("reading manifest from source image: %w", err) + } + matches, err = manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err) + } + if !matches { + return copySingleImageResult{}, errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + } + + if err := prepareImageConfigForDest(ctx, c.options.DestinationCtx, src, c.dest); err != nil { + return copySingleImageResult{}, err + } + + sigs, err := c.sourceSignatures(ctx, src, + "Getting image source signatures", + "Checking if image destination supports signatures") + if err != nil { + return copySingleImageResult{}, err + } + + // Determine if we're allowed to modify the manifest. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copyMultipleImages. + cannotModifyManifestReason := "" + if len(sigs) > 0 { + cannotModifyManifestReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestReason = "Destination specifies a digest" + } + if c.options.PreserveDigests { + cannotModifyManifestReason = "Instructed to preserve digests" + } + + ic := imageCopier{ + c: c, + manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, + src: src, + // manifestConversionPlan and diffIDsAreNeeded are computed later + cannotModifyManifestReason: cannotModifyManifestReason, + requireCompressionFormatMatch: opts.requireCompressionFormatMatch, + } + if opts.compressionFormat != nil { + ic.compressionFormat = opts.compressionFormat + ic.compressionLevel = opts.compressionLevel + } else if c.options.DestinationCtx != nil { + // Note that compressionFormat and compressionLevel can be nil. + ic.compressionFormat = c.options.DestinationCtx.CompressionFormat + ic.compressionLevel = c.options.DestinationCtx.CompressionLevel + } + // HACK: Don’t combine zstd:chunked and encryption. + // zstd:chunked can only usefully be consumed using range requests of parts of the layer, which would require the encryption + // to support decrypting arbitrary subsets of the stream. That’s plausible but not supported using the encryption API we have. + // Also, the chunked metadata is exposed in annotations unencrypted, which reveals the TOC digest = layer identity without + // encryption. (That can be determined from the unencrypted config anyway, but, still...) + // + // Ideally this should query a well-defined property of the compression algorithm (and $somehow determine the right fallback) instead of + // hard-coding zstd:chunked / zstd. + if ic.c.options.OciEncryptLayers != nil { + format := ic.compressionFormat + if format == nil { + format = defaultCompressionFormat + } + if format.Name() == compressiontypes.ZstdChunkedAlgorithmName { + if ic.requireCompressionFormatMatch { + return copySingleImageResult{}, errors.New("explicitly requested to combine zstd:chunked with encryption, which is not beneficial; use plain zstd instead") + } + logrus.Warnf("Compression using zstd:chunked is not beneficial for encrypted layers, using plain zstd instead") + ic.compressionFormat = &compression.Zstd + } + } + + // Decide whether we can substitute blobs with semantic equivalents: + // - Don’t do that if we can’t modify the manifest at all + // - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. + // This may be too conservative, but for now, better safe than sorry, _especially_ on the len(c.signers) != 0 path: + // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. + // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk + // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, + // and we would reuse and sign it. + ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0 + + if err := ic.updateEmbeddedDockerReference(); err != nil { + return copySingleImageResult{}, err + } + + destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig == nil) || c.options.OciEncryptLayers != nil + + ic.manifestConversionPlan, err = determineManifestConversion(determineManifestConversionInputs{ + srcMIMEType: ic.src.ManifestMIMEType, + destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(), + forceManifestMIMEType: c.options.ForceManifestMIMEType, + requestedCompressionFormat: ic.compressionFormat, + requiresOCIEncryption: destRequiresOciEncryption, + cannotModifyManifestReason: ic.cannotModifyManifestReason, + }) + if err != nil { + return copySingleImageResult{}, err + } + // We set up this part of ic.manifestUpdates quite early, not just around the + // code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code + // (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based + // on the expected destination format. + if ic.manifestConversionPlan.preferredMIMETypeNeedsConversion { + ic.manifestUpdates.ManifestMIMEType = ic.manifestConversionPlan.preferredMIMEType + } + + // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. + ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) + + // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal + if c.options.OptimizeDestinationImageAlreadyExists { + shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible + noPendingManifestUpdates := ic.noPendingManifestUpdates() + + logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for reusing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch) + if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates && !ic.requireCompressionFormatMatch { + matchedResult, err := ic.compareImageDestinationManifestEqual(ctx, targetInstance) + if err != nil { + logrus.Warnf("Failed to compare destination image manifest: %v", err) + return copySingleImageResult{}, err + } + + if matchedResult != nil { + c.Printf("Skipping: image already present at destination\n") + return *matchedResult, nil + } + } + } + + compressionAlgos, err := ic.copyLayers(ctx) + if err != nil { + return copySingleImageResult{}, err + } + + // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; + // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support + // without actually trying to upload something and getting a types.ManifestTypeRejectedError. + // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if + // we're altering how they're compressed. If the process succeeds, fine… + manifestBytes, manifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) + wipResult := copySingleImageResult{ + manifest: manifestBytes, + manifestMIMEType: ic.manifestConversionPlan.preferredMIMEType, + manifestDigest: manifestDigest, + } + if err != nil { + logrus.Debugf("Writing manifest using preferred type %s failed: %v", ic.manifestConversionPlan.preferredMIMEType, err) + // … if it fails, and the failure is either because the manifest is rejected by the registry, or + // because we failed to create a manifest of the specified type because the specific manifest type + // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may + // have other options available that could still succeed. + var manifestTypeRejectedError types.ManifestTypeRejectedError + var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError + isManifestRejected := errors.As(err, &manifestTypeRejectedError) + isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError) + if (!isManifestRejected && !isCompressionIncompatible) || len(ic.manifestConversionPlan.otherMIMETypeCandidates) == 0 { + // We don’t have other options. + // In principle the code below would handle this as well, but the resulting error message is fairly ugly. + // Don’t bother the user with MIME types if we have no choice. + return copySingleImageResult{}, err + } + // If the original MIME type is acceptable, determineManifestConversion always uses it as ic.manifestConversionPlan.preferredMIMEType. + // So if we are here, we will definitely be trying to convert the manifest. + // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, + // so let’s bail out early and with a better error message. + if ic.cannotModifyManifestReason != "" { + return copySingleImageResult{}, fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err) + } + + // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. + errs := []string{fmt.Sprintf("%s(%v)", ic.manifestConversionPlan.preferredMIMEType, err)} + for _, manifestMIMEType := range ic.manifestConversionPlan.otherMIMETypeCandidates { + logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) + ic.manifestUpdates.ManifestMIMEType = manifestMIMEType + attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) + if err != nil { + logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) + errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err)) + continue + } + + // We have successfully uploaded a manifest. + wipResult = copySingleImageResult{ + manifest: attemptedManifest, + manifestMIMEType: manifestMIMEType, + manifestDigest: attemptedManifestDigest, + } + errs = nil // Mark this as a success so that we don't abort below. + break + } + if errs != nil { + return copySingleImageResult{}, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) + } + } + if targetInstance != nil { + targetInstance = &wipResult.manifestDigest + } + + newSigs, err := c.createSignatures(ctx, wipResult.manifest, c.options.SignIdentity) + if err != nil { + return copySingleImageResult{}, err + } + sigs = append(slices.Clone(sigs), newSigs...) + + if len(sigs) > 0 { + c.Printf("Storing signatures\n") + if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil { + return copySingleImageResult{}, fmt.Errorf("writing signatures: %w", err) + } + } + wipResult.compressionAlgorithms = compressionAlgos + res := wipResult // We are done + return res, nil +} + +// prepareImageConfigForDest enforces dest.MustMatchRuntimeOS and handles dest.NoteOriginalOCIConfig, if necessary. +func prepareImageConfigForDest(ctx context.Context, sys *types.SystemContext, src types.Image, dest private.ImageDestination) error { + ociConfig, configErr := src.OCIConfig(ctx) + // Do not fail on configErr here, this might be an artifact + // and maybe nothing needs this to be a container image and to process the config. + + if dest.MustMatchRuntimeOS() { + if configErr != nil { + return fmt.Errorf("parsing image configuration: %w", configErr) + } + wantedPlatforms := platform.WantedPlatforms(sys) + + if !slices.ContainsFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool { + // For a transitional period, this might trigger warnings because the Variant + // field was added to OCI config only recently. If this turns out to be too noisy, + // revert this check to only look for (OS, Architecture). + return platform.MatchesPlatform(ociConfig.Platform, wantedPlatform) + }) { + options := newOrderedSet() + for _, p := range wantedPlatforms { + options.append(fmt.Sprintf("%s+%s+%q", p.OS, p.Architecture, p.Variant)) + } + logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q", + ociConfig.OS, ociConfig.Architecture, ociConfig.Variant, strings.Join(options.list, ", ")) + } + } + + if err := dest.NoteOriginalOCIConfig(ociConfig, configErr); err != nil { + return err + } + + return nil +} + +// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. +func (ic *imageCopier) updateEmbeddedDockerReference() error { + if ic.c.dest.IgnoresEmbeddedDockerReference() { + return nil // Destination would prefer us not to update the embedded reference. + } + destRef := ic.c.dest.Reference().DockerReference() + if destRef == nil { + return nil // Destination does not care about Docker references + } + if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { + return nil // No reference embedded in the manifest, or it matches destRef already. + } + + if ic.cannotModifyManifestReason != "" { + return fmt.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q", + transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason) + } + ic.manifestUpdates.EmbeddedDockerReference = destRef + return nil +} + +func (ic *imageCopier) noPendingManifestUpdates() bool { + return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) +} + +// compareImageDestinationManifestEqual compares the source and destination image manifests (reading the manifest from the +// (possibly remote) destination). If they are equal, it returns a full copySingleImageResult, nil otherwise. +func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, targetInstance *digest.Digest) (*copySingleImageResult, error) { + srcManifestDigest, err := manifest.Digest(ic.src.ManifestBlob) + if err != nil { + return nil, fmt.Errorf("calculating manifest digest: %w", err) + } + + destImageSource, err := ic.c.dest.Reference().NewImageSource(ctx, ic.c.options.DestinationCtx) + if err != nil { + logrus.Debugf("Unable to create destination image %s source: %v", ic.c.dest.Reference(), err) + return nil, nil + } + defer destImageSource.Close() + + destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance) + if err != nil { + logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err) + return nil, nil + } + + destManifestDigest, err := manifest.Digest(destManifest) + if err != nil { + return nil, fmt.Errorf("calculating manifest digest: %w", err) + } + + logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest) + if srcManifestDigest != destManifestDigest { + return nil, nil + } + + compressionAlgos := set.New[string]() + for _, srcInfo := range ic.src.LayerInfos() { + _, c, err := compressionEditsFromBlobInfo(srcInfo) + if err != nil { + return nil, err + } + if c != nil { + compressionAlgos.Add(c.Name()) + } + } + + algos, err := algorithmsByNames(compressionAlgos.All()) + if err != nil { + return nil, err + } + + // Destination and source manifests, types and digests should all be equivalent + return ©SingleImageResult{ + manifest: destManifest, + manifestMIMEType: destManifestType, + manifestDigest: srcManifestDigest, + compressionAlgorithms: algos, + }, nil +} + +// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "". +func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algorithm, error) { + srcInfos := ic.src.LayerInfos() + updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) + if err != nil { + return nil, err + } + srcInfosUpdated := false + if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { + if ic.cannotModifyManifestReason != "" { + return nil, fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason) + } + srcInfos = updatedSrcInfos + srcInfosUpdated = true + } + + type copyLayerData struct { + destInfo types.BlobInfo + diffID digest.Digest + err error + } + + // The manifest is used to extract the information whether a given + // layer is empty. + man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType) + if err != nil { + return nil, err + } + manifestLayerInfos := man.LayerInfos() + + // copyGroup is used to determine if all layers are copied + copyGroup := sync.WaitGroup{} + + data := make([]copyLayerData, len(srcInfos)) + copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) { + defer ic.c.concurrentBlobCopiesSemaphore.Release(1) + defer copyGroup.Done() + cld := copyLayerData{} + if !ic.c.options.DownloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + // DiffIDs are, currently, needed only when converting from schema1. + // In which case src.LayerInfos will not have URLs because schema1 + // does not support them. + if ic.diffIDsAreNeeded { + cld.err = errors.New("getting DiffID for foreign layers is unimplemented") + } else { + cld.destInfo = srcLayer + logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) + } + } else { + cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef, manifestLayerInfos[index].EmptyLayer) + } + data[index] = cld + } + + // Decide which layers to encrypt + layersToEncrypt := set.New[int]() + if ic.c.options.OciEncryptLayers != nil { + totalLayers := len(srcInfos) + for _, l := range *ic.c.options.OciEncryptLayers { + switch { + case l >= 0 && l < totalLayers: + layersToEncrypt.Add(l) + case l < 0 && l+totalLayers >= 0: // Implies (l + totalLayers) < totalLayers + layersToEncrypt.Add(l + totalLayers) // If l is negative, it is reverse indexed. + default: + return nil, fmt.Errorf("when choosing layers to encrypt, layer index %d out of range (%d layers exist)", l, totalLayers) + } + } + + if len(*ic.c.options.OciEncryptLayers) == 0 { // “encrypt all layers” + for i := 0; i < len(srcInfos); i++ { + layersToEncrypt.Add(i) + } + } + } + + if err := func() error { // A scope for defer + progressPool := ic.c.newProgressPool() + defer progressPool.Wait() + + // Ensure we wait for all layers to be copied. progressPool.Wait() must not be called while any of the copyLayerHelpers interact with the progressPool. + defer copyGroup.Wait() + + for i, srcLayer := range srcInfos { + if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. + return fmt.Errorf("copying layer: %w", err) + } + copyGroup.Add(1) + go copyLayerHelper(i, srcLayer, layersToEncrypt.Contains(i), progressPool, ic.c.rawSource.Reference().DockerReference()) + } + + // A call to copyGroup.Wait() is done at this point by the defer above. + return nil + }(); err != nil { + return nil, err + } + + compressionAlgos := set.New[string]() + destInfos := make([]types.BlobInfo, len(srcInfos)) + diffIDs := make([]digest.Digest, len(srcInfos)) + for i, cld := range data { + if cld.err != nil { + return nil, cld.err + } + if cld.destInfo.CompressionAlgorithm != nil { + compressionAlgos.Add(cld.destInfo.CompressionAlgorithm.Name()) + } + destInfos[i] = cld.destInfo + diffIDs[i] = cld.diffID + } + + // WARNING: If you are adding new reasons to change ic.manifestUpdates, also update the + // OptimizeDestinationImageAlreadyExists short-circuit conditions + ic.manifestUpdates.InformationOnly.LayerInfos = destInfos + if ic.diffIDsAreNeeded { + ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs + } + if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { + ic.manifestUpdates.LayerInfos = destInfos + } + algos, err := algorithmsByNames(compressionAlgos.All()) + if err != nil { + return nil, err + } + return algos, nil +} + +// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields) +func layerDigestsDiffer(a, b []types.BlobInfo) bool { + return !slices.EqualFunc(a, b, func(a, b types.BlobInfo) bool { + return a.Digest == b.Digest + }) +} + +// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, +// stores the resulting config and manifest to the destination, and returns the stored manifest +// and its digest. +func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { + var pendingImage types.Image = ic.src + if !ic.noPendingManifestUpdates() { + if ic.cannotModifyManifestReason != "" { + return nil, "", fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason) + } + if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { + // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. + // So, this can only happen if we are trying to upload using one of the other MIME type candidates. + // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise + // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. + // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. + // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. + return nil, "", fmt.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) + } + pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates) + if err != nil { + return nil, "", fmt.Errorf("creating an updated image manifest: %w", err) + } + pendingImage = pi + } + man, _, err := pendingImage.Manifest(ctx) + if err != nil { + return nil, "", fmt.Errorf("reading manifest: %w", err) + } + + if err := ic.copyConfig(ctx, pendingImage); err != nil { + return nil, "", err + } + + ic.c.Printf("Writing manifest to image destination\n") + manifestDigest, err := manifest.Digest(man) + if err != nil { + return nil, "", err + } + if instanceDigest != nil { + instanceDigest = &manifestDigest + } + if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { + logrus.Debugf("Error %v while writing manifest %q", err, string(man)) + return nil, "", fmt.Errorf("writing manifest: %w", err) + } + return man, manifestDigest, nil +} + +// copyConfig copies config.json, if any, from src to dest. +func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error { + srcInfo := src.ConfigInfo() + if srcInfo.Digest != "" { + if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. + return fmt.Errorf("copying config: %w", err) + } + defer ic.c.concurrentBlobCopiesSemaphore.Release(1) + + destInfo, err := func() (types.BlobInfo, error) { // A scope for defer + progressPool := ic.c.newProgressPool() + defer progressPool.Wait() + bar, err := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done") + if err != nil { + return types.BlobInfo{}, err + } + defer bar.Abort(false) + ic.c.printCopyInfo("config", srcInfo) + + configBlob, err := src.ConfigBlob(ctx) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("reading config blob %s: %w", srcInfo.Digest, err) + } + + destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false) + if err != nil { + return types.BlobInfo{}, err + } + + bar.mark100PercentComplete() + return destInfo, nil + }() + if err != nil { + return err + } + if destInfo.Digest != srcInfo.Digest { + return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) + } + } + return nil +} + +// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. +// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. +type diffIDResult struct { + digest digest.Digest + err error +} + +// compressionEditsFromBlobInfo returns a (CompressionOperation, CompressionAlgorithm) value pair suitable +// for types.BlobInfo. +func compressionEditsFromBlobInfo(srcInfo types.BlobInfo) (types.LayerCompression, *compressiontypes.Algorithm, error) { + // This MIME type → compression mapping belongs in manifest-specific code in our manifest + // package (but we should preferably replace/change UpdatedImage instead of productizing + // this workaround). + switch srcInfo.MediaType { + case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip: + return types.PreserveOriginal, &compression.Gzip, nil + case imgspecv1.MediaTypeImageLayerZstd: + tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return types.PreserveOriginal, nil, err + } + if tocDigest != nil { + return types.PreserveOriginal, &compression.ZstdChunked, nil + } + return types.PreserveOriginal, &compression.Zstd, nil + case manifest.DockerV2SchemaLayerMediaTypeUncompressed, imgspecv1.MediaTypeImageLayer: + return types.Decompress, nil, nil + default: + return types.PreserveOriginal, nil, nil + } +} + +// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it, +// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded +// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil. +func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named, emptyLayer bool) (types.BlobInfo, digest.Digest, error) { + // If the srcInfo doesn't contain compression information, try to compute it from the + // MediaType, which was either read from a manifest by way of LayerInfos() or constructed + // by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob, + // the BlobInfo we return will be passed to UpdatedImage() and then to UpdateLayerInfos(), + // which uses the compression information to compute the updated MediaType values. + // (Sadly UpdatedImage() is documented to not update MediaTypes from + // ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.) + if srcInfo.CompressionOperation == types.PreserveOriginal && srcInfo.CompressionAlgorithm == nil { + op, algo, err := compressionEditsFromBlobInfo(srcInfo) + if err != nil { + return types.BlobInfo{}, "", err + } + srcInfo.CompressionOperation = op + srcInfo.CompressionAlgorithm = algo + } + + ic.c.printCopyInfo("blob", srcInfo) + + diffIDIsNeeded := false + var cachedDiffID digest.Digest = "" + if ic.diffIDsAreNeeded { + cachedDiffID = ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" + diffIDIsNeeded = cachedDiffID == "" + } + // When encrypting to decrypting, only use the simple code path. We might be able to optimize more + // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again), + // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not. + encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.options.OciDecryptConfig != nil) + canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting + + // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable. + if canAvoidProcessingCompleteLayer { + canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType) + logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v", + srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression) + canSubstitute := ic.canSubstituteBlobs && canChangeLayerCompression + + var requiredCompression *compressiontypes.Algorithm + if ic.requireCompressionFormatMatch { + requiredCompression = ic.compressionFormat + } + + var tocDigest digest.Digest + + // Check if we have a chunked layer in storage that's based on that blob. These layers are stored by their TOC digest. + d, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return types.BlobInfo{}, "", err + } + if d != nil { + tocDigest = *d + } + + reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ + Cache: ic.c.blobInfoCache, + CanSubstitute: canSubstitute, + EmptyLayer: emptyLayer, + LayerIndex: &layerIndex, + SrcRef: srcRef, + PossibleManifestFormats: append([]string{ic.manifestConversionPlan.preferredMIMEType}, ic.manifestConversionPlan.otherMIMETypeCandidates...), + RequiredCompression: requiredCompression, + OriginalCompression: srcInfo.CompressionAlgorithm, + TOCDigest: tocDigest, + }) + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err) + } + if reused { + logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) + if err := func() error { // A scope for defer + label := "skipped: already exists" + if reusedBlob.MatchedByTOCDigest { + label = "skipped: already exists (found by TOC)" + } + bar, err := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", label) + if err != nil { + return err + } + defer bar.Abort(false) + bar.mark100PercentComplete() + return nil + }(); err != nil { + return types.BlobInfo{}, "", err + } + + // Throw an event that the layer has been skipped + if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 { + ic.c.options.Progress <- types.ProgressProperties{ + Event: types.ProgressEventSkipped, + Artifact: srcInfo, + } + } + + return updatedBlobInfoFromReuse(srcInfo, reusedBlob), cachedDiffID, nil + } + } + + // A partial pull is managed by the destination storage, that decides what portions + // of the source file are not known yet and must be fetched. + // Attempt a partial only when the source allows to retrieve a blob partially and + // the destination has support for it. + if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() { + reused, blobInfo, err := func() (bool, types.BlobInfo, error) { // A scope for defer + bar, err := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") + if err != nil { + return false, types.BlobInfo{}, err + } + hideProgressBar := true + defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily. + bar.Abort(hideProgressBar) + }() + + proxy := blobChunkAccessorProxy{ + wrapped: ic.c.rawSource, + bar: bar, + } + uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, private.PutBlobPartialOptions{ + Cache: ic.c.blobInfoCache, + LayerIndex: layerIndex, + }) + if err == nil { + if srcInfo.Size != -1 { + refill := srcInfo.Size - bar.Current() + bar.SetCurrent(srcInfo.Size) + bar.SetRefill(refill) + } + bar.mark100PercentComplete() + hideProgressBar = false + logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) + return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob), nil + } + // On a "partial content not available" error, ignore it and retrieve the whole layer. + var perr private.ErrFallbackToOrdinaryLayerDownload + if errors.As(err, &perr) { + logrus.Debugf("Failed to retrieve partial blob: %v", err) + return false, types.BlobInfo{}, nil + } + return false, types.BlobInfo{}, err + }() + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("partial pull of blob %s: %w", srcInfo.Digest, err) + } + if reused { + return blobInfo, cachedDiffID, nil + } + } + + // Fallback: copy the layer, computing the diffID if we need to do so + return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer + bar, err := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") + if err != nil { + return types.BlobInfo{}, "", err + } + defer bar.Abort(false) + + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err) + } + defer srcStream.Close() + + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer) + if err != nil { + return types.BlobInfo{}, "", err + } + + diffID := cachedDiffID + if diffIDIsNeeded { + select { + case <-ctx.Done(): + return types.BlobInfo{}, "", ctx.Err() + case diffIDResult := <-diffIDChan: + if diffIDResult.err != nil { + return types.BlobInfo{}, "", fmt.Errorf("computing layer DiffID: %w", diffIDResult.err) + } + logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encryptingOrDecrypting { + // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process + // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. + ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + } + diffID = diffIDResult.digest + } + } + + bar.mark100PercentComplete() + return blobInfo, diffID, nil + }() +} + +// updatedBlobInfoFromReuse returns inputInfo updated with reusedBlob which was created based on inputInfo. +func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.ReusedBlob) types.BlobInfo { + // The transport is only tasked with finding the blob, determining its size if necessary, and returning the right + // compression format if the blob was substituted. + // Handling of compression, encryption, and the related MIME types and the like are all the responsibility + // of the generic code in this package. + res := types.BlobInfo{ + Digest: reusedBlob.Digest, + Size: reusedBlob.Size, + URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. + // FIXME: This should remove zstd:chunked annotations IF the original was chunked and the new one isn’t + // (but those annotations being left with incorrect values should not break pulls). + Annotations: maps.Clone(inputInfo.Annotations), + MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation. + CompressionOperation: reusedBlob.CompressionOperation, + CompressionAlgorithm: reusedBlob.CompressionAlgorithm, + CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway. + } + // The transport is only expected to fill CompressionOperation and CompressionAlgorithm + // if the blob was substituted; otherwise, it is optional, and if not set, fill it in based + // on what we know from the srcInfos we were given. + if reusedBlob.Digest == inputInfo.Digest { + if res.CompressionOperation == types.PreserveOriginal { + res.CompressionOperation = inputInfo.CompressionOperation + } + if res.CompressionAlgorithm == nil { + res.CompressionAlgorithm = inputInfo.CompressionAlgorithm + } + } + if len(reusedBlob.CompressionAnnotations) != 0 { + if res.Annotations == nil { + res.Annotations = map[string]string{} + } + maps.Copy(res.Annotations, reusedBlob.CompressionAnnotations) + } + return res +} + +// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. +// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, +// perhaps (de/re/)compressing the stream, +// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. +func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, + diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { + var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil + var diffIDChan chan diffIDResult + + err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow + if diffIDIsNeeded { + diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block. + pipeReader, pipeWriter := io.Pipe() + defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. + _ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil + }() + + getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer { + // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further + // reading from the pipe has failed, we don’t really care. + // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, + // the return value includes an error indication, which we do check. + // + // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be + // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. + go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader + return pipeWriter + } + } + + blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success + return blobInfo, diffIDChan, err + // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan +} + +// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. +func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) { + result := diffIDResult{ + digest: "", + err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), + } + defer func() { dest <- result }() + defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. + + result.digest, result.err = computeDiffID(layerStream, decompressor) +} + +// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. +func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) { + if decompressor != nil { + s, err := decompressor(stream) + if err != nil { + return "", err + } + defer s.Close() + stream = s + } + + return digest.Canonical.FromReader(stream) +} + +// algorithmsByNames returns slice of Algorithms from a sequence of Algorithm Names +func algorithmsByNames(names iter.Seq[string]) ([]compressiontypes.Algorithm, error) { + result := []compressiontypes.Algorithm{} + for name := range names { + algo, err := compression.AlgorithmByName(name) + if err != nil { + return nil, err + } + result = append(result, algo) + } + return result, nil +} diff --git a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go new file mode 100644 index 0000000000..69c1e0727e --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go @@ -0,0 +1,57 @@ +package explicitfilepath + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/fileutils" +) + +// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. +// To do so, all elements of the input path must exist; as a special case, the final component may be +// a non-existent name (but not a symlink pointing to a non-existent name) +// This is intended as a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. +func ResolvePathToFullyExplicit(path string) (string, error) { + switch err := fileutils.Lexists(path); { + case err == nil: + return resolveExistingPathToFullyExplicit(path) + case os.IsNotExist(err): + parent, file := filepath.Split(path) + resolvedParent, err := resolveExistingPathToFullyExplicit(parent) + if err != nil { + return "", err + } + if file == "." || file == ".." { + // Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well. + // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed. + // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components + // in the resulting path, and especially not at the end. + return "", fmt.Errorf("Unexpectedly missing special filename component in %s", path) + } + resolvedPath := filepath.Join(resolvedParent, file) + // As a sanity check, ensure that there are no "." or ".." components. + cleanedResolvedPath := filepath.Clean(resolvedPath) + if cleanedResolvedPath != resolvedPath { + // Coverage: This should never happen. + return "", fmt.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) + } + return resolvedPath, nil + default: // err != nil, unrecognized + return "", err + } +} + +// resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit, +// but without the special case for missing final component. +func resolveExistingPathToFullyExplicit(path string) (string, error) { + resolved, err := filepath.Abs(path) + if err != nil { + return "", err // Coverage: This can fail only if os.Getwd() fails. + } + resolved, err = filepath.EvalSymlinks(resolved) + if err != nil { + return "", err + } + return filepath.Clean(resolved), nil +} diff --git a/vendor/github.com/containers/image/v5/docker/body_reader.go b/vendor/github.com/containers/image/v5/docker/body_reader.go index e69e21ef7a..3c612f2688 100644 --- a/vendor/github.com/containers/image/v5/docker/body_reader.go +++ b/vendor/github.com/containers/image/v5/docker/body_reader.go @@ -35,9 +35,9 @@ type bodyReader struct { body io.ReadCloser // The currently open connection we use to read data, or nil if there is nothing to read from / close. lastRetryOffset int64 // -1 if N/A - lastRetryTime time.Time // time.Time{} if N/A + lastRetryTime time.Time // IsZero() if N/A offset int64 // Current offset within the blob - lastSuccessTime time.Time // time.Time{} if N/A + lastSuccessTime time.Time // IsZero() if N/A } // newBodyReader creates a bodyReader for request path in c. @@ -207,9 +207,9 @@ func (br *bodyReader) Read(p []byte) (int, error) { } // millisecondsSinceOptional is like currentTime.Sub(tm).Milliseconds, but it returns a floating-point value. -// If tm is time.Time{}, it returns math.NaN() +// If tm.IsZero(), it returns math.NaN() func millisecondsSinceOptional(currentTime time.Time, tm time.Time) float64 { - if tm == (time.Time{}) { + if tm.IsZero() { return math.NaN() } return float64(currentTime.Sub(tm).Nanoseconds()) / 1_000_000.0 @@ -229,7 +229,7 @@ func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL stri logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %d bytes…", redactedURL, originalErr, progress) return nil } - if br.lastRetryTime == (time.Time{}) { + if br.lastRetryTime.IsZero() { logrus.Infof("Reading blob body from %s failed (%v), reconnecting (first reconnection)…", redactedURL, originalErr) return nil } diff --git a/vendor/github.com/containers/image/v5/docker/distribution_error.go b/vendor/github.com/containers/image/v5/docker/distribution_error.go index 622d21fb1c..06a9593dcd 100644 --- a/vendor/github.com/containers/image/v5/docker/distribution_error.go +++ b/vendor/github.com/containers/image/v5/docker/distribution_error.go @@ -30,14 +30,25 @@ import ( // errcode.Errors slice. var errNoErrorsInBody = errors.New("no error details found in HTTP response body") -// unexpectedHTTPStatusError is returned when an unexpected HTTP status is +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is // returned when making a registry api call. -type unexpectedHTTPStatusError struct { - Status string +type UnexpectedHTTPStatusError struct { + // StatusCode code as returned from the server, so callers can + // match the exact code to make certain decisions if needed. + StatusCode int + // status text as displayed in the error message, not exposed as callers should match the number. + status string +} + +func (e UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("received unexpected HTTP status: %s", e.status) } -func (e *unexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) +func newUnexpectedHTTPStatusError(resp *http.Response) UnexpectedHTTPStatusError { + return UnexpectedHTTPStatusError{ + StatusCode: resp.StatusCode, + status: resp.Status, + } } // unexpectedHTTPResponseError is returned when an expected HTTP status code @@ -117,7 +128,7 @@ func handleErrorResponse(resp *http.Response) error { case resp.StatusCode == http.StatusUnauthorized: // Check for OAuth errors within the `WWW-Authenticate` header first // See https://tools.ietf.org/html/rfc6750#section-3 - for _, c := range parseAuthHeader(resp.Header) { + for c := range iterateAuthHeader(resp.Header) { if c.Scheme == "bearer" { var err errcode.Error // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 @@ -146,5 +157,5 @@ func handleErrorResponse(resp *http.Response) error { } return err } - return &unexpectedHTTPStatusError{Status: resp.Status} + return newUnexpectedHTTPStatusError(resp) } diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 220afd0091..851d3e082d 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -11,6 +11,7 @@ import ( "net/url" "os" "path/filepath" + "slices" "strconv" "strings" "sync" @@ -475,12 +476,11 @@ func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) { } // Checks if the auth headers in the response contain an indication of a failed -// authorizdation because of an "insufficient_scope" error. If that's the case, +// authorization because of an "insufficient_scope" error. If that's the case, // returns the required scope to be used for fetching a new token. func needsRetryWithUpdatedScope(res *http.Response) (bool, *authScope) { if res.StatusCode == http.StatusUnauthorized { - challenges := parseAuthHeader(res.Header) - for _, challenge := range challenges { + for challenge := range iterateAuthHeader(res.Header) { if challenge.Scheme == "bearer" { if errmsg, ok := challenge.Parameters["error"]; ok && errmsg == "insufficient_scope" { if scope, ok := challenge.Parameters["scope"]; ok && scope != "" { @@ -907,6 +907,10 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { } tr := tlsclientconfig.NewTransport() tr.TLSClientConfig = c.tlsClientConfig + // if set DockerProxyURL explicitly, use the DockerProxyURL instead of system proxy + if c.sys != nil && c.sys.DockerProxyURL != nil { + tr.Proxy = http.ProxyURL(c.sys.DockerProxyURL) + } c.client = &http.Client{Transport: tr} ping := func(scheme string) error { @@ -924,7 +928,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return registryHTTPResponseToError(resp) } - c.challenges = parseAuthHeader(resp.Header) + c.challenges = slices.Collect(iterateAuthHeader(resp.Header)) c.scheme = scheme c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" return nil @@ -992,13 +996,18 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R continue } if resp.StatusCode != http.StatusOK { - err := fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) + err := fmt.Errorf("error fetching external blob from %q: %w", u, newUnexpectedHTTPStatusError(resp)) remoteErrors = append(remoteErrors, err) logrus.Debug(err) resp.Body.Close() continue } - return resp.Body, getBlobSize(resp), nil + + size, err := getBlobSize(resp) + if err != nil { + size = -1 + } + return resp.Body, size, nil } if remoteErrors == nil { return nil, 0, nil // fallback to non-external blob @@ -1006,12 +1015,20 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R return nil, 0, fmt.Errorf("failed fetching external blob from all urls: %w", multierr.Format("", ", ", "", remoteErrors)) } -func getBlobSize(resp *http.Response) int64 { - size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - if err != nil { - size = -1 +func getBlobSize(resp *http.Response) (int64, error) { + hdrs := resp.Header.Values("Content-Length") + if len(hdrs) == 0 { + return -1, errors.New(`Missing "Content-Length" header in response`) } - return size + hdr := hdrs[0] // Equivalent to resp.Header.Get(…) + size, err := strconv.ParseInt(hdr, 10, 64) + if err != nil { // Go’s response reader should already reject such values. + return -1, err + } + if size < 0 { // '-' is not a valid character in Content-Length, so negative values are invalid. Go’s response reader should already reject such values. + return -1, fmt.Errorf(`Invalid negative "Content-Length" %q`, hdr) + } + return size, nil } // getBlob returns a stream for the specified blob in ref, and the blob’s size (or -1 if unknown). @@ -1042,7 +1059,10 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty return nil, 0, fmt.Errorf("fetching blob: %w", err) } cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref)) - blobSize := getBlobSize(res) + blobSize, err := getBlobSize(res) + if err != nil { + blobSize = -1 + } reconnectingReader, err := newBodyReader(ctx, c, path, res.Body) if err != nil { diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 3ac43cf9fc..b08ebc6fec 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -243,8 +243,12 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference. defer res.Body.Close() switch res.StatusCode { case http.StatusOK: + size, err := getBlobSize(res) + if err != nil { + return false, -1, fmt.Errorf("determining size of blob %s in %s: %w", digest, repo.Name(), err) + } logrus.Debugf("... already exists") - return true, getBlobSize(res), nil + return true, size, nil case http.StatusUnauthorized: logrus.Debugf("... not authorized") return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res)) diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 41ab9bfd16..4eb9cdfba5 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -569,7 +569,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL logrus.Debugf("... got status 404, as expected = end of signatures") return nil, true, nil } else if res.StatusCode != http.StatusOK { - return nil, false, fmt.Errorf("reading signature from %s: status %d (%s)", sigURL.Redacted(), res.StatusCode, http.StatusText(res.StatusCode)) + return nil, false, fmt.Errorf("reading signature from %s: %w", sigURL.Redacted(), newUnexpectedHTTPStatusError(res)) } contentType := res.Header.Get("Content-Type") diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go index e749b50148..1ed40b87f7 100644 --- a/vendor/github.com/containers/image/v5/docker/errors.go +++ b/vendor/github.com/containers/image/v5/docker/errors.go @@ -40,10 +40,10 @@ func httpResponseToError(res *http.Response, context string) error { err := registryHTTPResponseToError(res) return ErrUnauthorizedForCredentials{Err: err} default: - if context != "" { - context += ": " + if context == "" { + return newUnexpectedHTTPStatusError(res) } - return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode)) + return fmt.Errorf("%s: %w", context, newUnexpectedHTTPStatusError(res)) } } diff --git a/vendor/github.com/containers/image/v5/docker/paths_common.go b/vendor/github.com/containers/image/v5/docker/paths_common.go index 862e880397..d9993630bc 100644 --- a/vendor/github.com/containers/image/v5/docker/paths_common.go +++ b/vendor/github.com/containers/image/v5/docker/paths_common.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd package docker diff --git a/vendor/github.com/containers/image/v5/docker/paths_freebsd.go b/vendor/github.com/containers/image/v5/docker/paths_freebsd.go index 2bf27ac06c..8f0f2eee88 100644 --- a/vendor/github.com/containers/image/v5/docker/paths_freebsd.go +++ b/vendor/github.com/containers/image/v5/docker/paths_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package docker diff --git a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go index 6bcb835b9e..f5fed07b89 100644 --- a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go +++ b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go @@ -4,6 +4,7 @@ package docker import ( "fmt" + "iter" "net/http" "strings" ) @@ -60,15 +61,17 @@ func init() { } } -func parseAuthHeader(header http.Header) []challenge { - challenges := []challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, challenge{Scheme: v, Parameters: p}) +func iterateAuthHeader(header http.Header) iter.Seq[challenge] { + return func(yield func(challenge) bool) { + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + if !yield(challenge{Scheme: v, Parameters: p}) { + return + } + } } } - return challenges } // parseAuthScope parses an authentication scope string of the form `$resource:$remote:$actions` diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go new file mode 100644 index 0000000000..e5a3b89912 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go @@ -0,0 +1,14 @@ +package image + +import ( + "github.com/containers/image/v5/internal/image" +) + +// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) +// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is +// a non-zero embedded timestamp; we could zero that, but that would just waste storage space +// in registries, so let’s use the same values. +var GzippedEmptyLayer = image.GzippedEmptyLayer + +// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer +const GzippedEmptyLayerDigest = image.GzippedEmptyLayerDigest diff --git a/vendor/github.com/containers/image/v5/image/sourced.go b/vendor/github.com/containers/image/v5/image/sourced.go new file mode 100644 index 0000000000..2b7f6b144b --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/sourced.go @@ -0,0 +1,37 @@ +// Package image consolidates knowledge about various container image formats +// (as opposed to image storage mechanisms, which are handled by types.ImageSource) +// and exposes all of them using an unified interface. +package image + +import ( + "context" + + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/types" +) + +// FromSource returns a types.ImageCloser implementation for the default instance of source. +// If source is a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. +// +// FromSource “takes ownership” of the input ImageSource and will call src.Close() +// when the image is closed. (This does not prevent callers from using both the +// Image and ImageSource objects simultaneously, but it means that they only need to +// the Image.) +// +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. +func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { + return image.FromSource(ctx, sys, src) +} + +// FromUnparsedImage returns a types.Image implementation for unparsed. +// If unparsed represents a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate single image. +// +// The Image must not be used after the underlying ImageSource is Close()d. +func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { + return image.FromUnparsedImage(ctx, sys, unparsed) +} diff --git a/vendor/github.com/containers/image/v5/image/unparsed.go b/vendor/github.com/containers/image/v5/image/unparsed.go new file mode 100644 index 0000000000..f530824a92 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/unparsed.go @@ -0,0 +1,47 @@ +package image + +import ( + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/unparsedimage" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// UnparsedImage implements types.UnparsedImage . +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +type UnparsedImage = image.UnparsedImage + +// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). +// +// This implementation of [types.UnparsedImage] ensures that [types.UnparsedImage.Manifest] validates the image +// against instanceDigest if set, or, if not, a digest implied by src.Reference, if any. +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { + return image.UnparsedInstance(src, instanceDigest) +} + +// unparsedWithRef wraps a private.UnparsedImage, claiming another replacementRef +type unparsedWithRef struct { + private.UnparsedImage + ref types.ImageReference +} + +func (uwr *unparsedWithRef) Reference() types.ImageReference { + return uwr.ref +} + +// UnparsedInstanceWithReference returns a types.UnparsedImage for wrappedInstance which claims to be a replacementRef. +// This is useful for combining image data with other reference values, e.g. to check signatures on a locally-pulled image +// based on a remote-registry policy. +// +// For the purposes of digest validation in [types.UnparsedImage.Manifest], what matters is the +// reference originally used to create wrappedInstance, not replacementRef. +func UnparsedInstanceWithReference(wrappedInstance types.UnparsedImage, replacementRef types.ImageReference) types.UnparsedImage { + return &unparsedWithRef{ + UnparsedImage: unparsedimage.FromPublic(wrappedInstance), + ref: replacementRef, + } +} diff --git a/vendor/github.com/containers/image/v5/internal/image/unparsed.go b/vendor/github.com/containers/image/v5/internal/image/unparsed.go index 0f026501c2..1cffe4311b 100644 --- a/vendor/github.com/containers/image/v5/internal/image/unparsed.go +++ b/vendor/github.com/containers/image/v5/internal/image/unparsed.go @@ -30,6 +30,9 @@ type UnparsedImage struct { // UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). // +// This implementation of [types.UnparsedImage] ensures that [types.UnparsedImage.Manifest] validates the image +// against instanceDigest if set, or, if not, a digest implied by src.Reference, if any. +// // The UnparsedImage must not be used after the underlying ImageSource is Close()d. // // This is publicly visible as c/image/image.UnparsedInstance. @@ -48,6 +51,9 @@ func (i *UnparsedImage) Reference() types.ImageReference { } // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +// +// Users of UnparsedImage are promised that this validates the image +// against either i.instanceDigest if set, or against a digest included in i.src.Reference. func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { if i.cachedManifest == nil { m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go new file mode 100644 index 0000000000..b2462a3bc1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -0,0 +1,108 @@ +package imagedestination + +import ( + "context" + "io" + + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// wrapped provides the private.ImageDestination operations +// for a destination that only implements types.ImageDestination +type wrapped struct { + stubs.IgnoresOriginalOCIConfig + stubs.NoPutBlobPartialInitialize + + types.ImageDestination +} + +// FromPublic(dest) returns an object that provides the private.ImageDestination API +// +// Eventually, we might want to expose this function, and methods of the returned object, +// as a public API (or rather, a variant that does not include the already-superseded +// methods of types.ImageDestination, and has added more future-proofing), and more strongly +// deprecate direct use of types.ImageDestination. +// +// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct +// with public methods, or perhaps a private interface), so that we can add methods +// without breaking any external implementers of a public interface. +func FromPublic(dest types.ImageDestination) private.ImageDestination { + if dest2, ok := dest.(private.ImageDestination); ok { + return dest2 + } + return &wrapped{ + NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(dest.Reference()), + + ImageDestination: dest, + } +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { + res, err := w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig) + if err != nil { + return private.UploadedBlob{}, err + } + return private.UploadedBlob{ + Digest: res.Digest, + Size: res.Size, + }, nil +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil). +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if options.RequiredCompression != nil { + return false, private.ReusedBlob{}, nil + } + reused, blob, err := w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute) + if !reused || err != nil { + return reused, private.ReusedBlob{}, err + } + return true, private.ReusedBlob{ + Digest: blob.Digest, + Size: blob.Size, + CompressionOperation: blob.CompressionOperation, + CompressionAlgorithm: blob.CompressionAlgorithm, + // CompressionAnnotations could be set to blob.Annotations, but that may contain unrelated + // annotations, and we didn’t use the blob.Annotations field previously, so we’ll + // continue not using it. + }, nil +} + +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (w *wrapped) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + simpleSigs := [][]byte{} + for _, sig := range signatures { + simpleSig, ok := sig.(signature.SimpleSigning) + if !ok { + return signature.UnsupportedFormatError(sig) + } + simpleSigs = append(simpleSigs, simpleSig.UntrustedSignature()) + } + return w.PutSignatures(ctx, simpleSigs, instanceDigest) +} + +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (w *wrapped) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { + return w.Commit(ctx, options.UnparsedToplevel) +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index 6a0f88d3a6..719deccbb2 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -213,12 +213,12 @@ type instanceCandidate struct { digest digest.Digest // Instance digest } -func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip bool) bool { +func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip types.OptionalBool) bool { switch { case ic.platformIndex != other.platformIndex: return ic.platformIndex < other.platformIndex case ic.isZstd != other.isZstd: - if !preferGzip { + if preferGzip != types.OptionalBoolTrue { return ic.isZstd } else { return !ic.isZstd @@ -232,10 +232,6 @@ func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip // chooseInstance is a private equivalent to ChooseInstanceByCompression, // shared by ChooseInstance and ChooseInstanceByCompression. func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { - didPreferGzip := false - if preferGzip == types.OptionalBoolTrue { - didPreferGzip = true - } wantedPlatforms := platform.WantedPlatforms(ctx) var bestMatch *instanceCandidate bestMatch = nil @@ -251,7 +247,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi } candidate.platformIndex = platformIndex } - if bestMatch == nil || candidate.isPreferredOver(bestMatch, didPreferGzip) { + if bestMatch == nil || candidate.isPreferredOver(bestMatch, preferGzip) { bestMatch = &candidate } } diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go index afd425483d..d9466e9001 100644 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -3,6 +3,7 @@ package private import ( "context" "io" + "time" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/blobinfocache" @@ -170,6 +171,12 @@ type CommitOptions struct { // What “resolved” means is transport-specific. // Transports which don’t support reporting resolved references can ignore the field; the generic copy code writes "nil" into the value. ReportResolvedReference *types.ImageReference + // Timestamp, if set, will force timestamps of content created in the destination to this value. + // Most transports don't support this. + // + // In oci-archive: destinations, this will set the create/mod/access timestamps in each tar entry + // (but not a timestamp of the created archive file). + Timestamp *time.Time } // ImageSourceChunk is a portion of a blob. diff --git a/vendor/github.com/containers/image/v5/internal/set/set.go b/vendor/github.com/containers/image/v5/internal/set/set.go index acf30343e0..7716b12d5b 100644 --- a/vendor/github.com/containers/image/v5/internal/set/set.go +++ b/vendor/github.com/containers/image/v5/internal/set/set.go @@ -1,6 +1,9 @@ package set -import "golang.org/x/exp/maps" +import ( + "iter" + "maps" +) // FIXME: // - Docstrings @@ -28,8 +31,8 @@ func (s *Set[E]) Add(v E) { s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again. } -func (s *Set[E]) AddSlice(slice []E) { - for _, v := range slice { +func (s *Set[E]) AddSeq(seq iter.Seq[E]) { + for v := range seq { s.Add(v) } } @@ -47,6 +50,6 @@ func (s *Set[E]) Empty() bool { return len(s.m) == 0 } -func (s *Set[E]) Values() []E { +func (s *Set[E]) All() iter.Seq[E] { return maps.Keys(s.m) } diff --git a/vendor/github.com/containers/image/v5/internal/signer/signer.go b/vendor/github.com/containers/image/v5/internal/signer/signer.go new file mode 100644 index 0000000000..5720254d1c --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/signer/signer.go @@ -0,0 +1,47 @@ +package signer + +import ( + "context" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/signature" +) + +// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images. +// This type is visible to external callers, so it has no public fields or methods apart from Close(). +// +// The owner of a Signer must call Close() when done. +type Signer struct { + implementation SignerImplementation +} + +// NewSigner creates a public Signer from a SignerImplementation +func NewSigner(impl SignerImplementation) *Signer { + return &Signer{implementation: impl} +} + +func (s *Signer) Close() error { + return s.implementation.Close() +} + +// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. +// Alternatively, should SignImageManifest be provided a logging writer of some kind? +func ProgressMessage(signer *Signer) string { + return signer.implementation.ProgressMessage() +} + +// SignImageManifest invokes a SignerImplementation. +// This is a function, not a method, so that it can only be called by code that is allowed to import this internal subpackage. +func SignImageManifest(ctx context.Context, signer *Signer, manifest []byte, dockerReference reference.Named) (signature.Signature, error) { + return signer.implementation.SignImageManifest(ctx, manifest, dockerReference) +} + +// SignerImplementation is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images. +// This interface is distinct from Signer so that implementations can be created outside of this package. +type SignerImplementation interface { + // ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. + ProgressMessage() string + // SignImageManifest creates a new signature for manifest m as dockerReference. + SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error) + Close() error +} diff --git a/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go b/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go new file mode 100644 index 0000000000..fe65b1a982 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go @@ -0,0 +1,38 @@ +package unparsedimage + +import ( + "context" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/types" +) + +// wrapped provides the private.UnparsedImage operations +// for an object that only implements types.UnparsedImage +type wrapped struct { + types.UnparsedImage +} + +// FromPublic(unparsed) returns an object that provides the private.UnparsedImage API +func FromPublic(unparsed types.UnparsedImage) private.UnparsedImage { + if unparsed2, ok := unparsed.(private.UnparsedImage); ok { + return unparsed2 + } + return &wrapped{ + UnparsedImage: unparsed, + } +} + +// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need. +func (w *wrapped) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) { + sigs, err := w.Signatures(ctx) + if err != nil { + return nil, err + } + res := []signature.Signature{} + for _, sig := range sigs { + res = append(res, signature.SimpleSigningFromBlob(sig)) + } + return res, nil +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go index b74a1e240d..f4b1fc0337 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -133,12 +133,12 @@ func (m *Schema1) ConfigInfo() types.BlobInfo { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *Schema1) LayerInfos() []LayerInfo { - layers := make([]LayerInfo, len(m.FSLayers)) - for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) - layers[(len(m.FSLayers)-1)-i] = LayerInfo{ + layers := make([]LayerInfo, 0, len(m.FSLayers)) + for i, layer := range slices.Backward(m.FSLayers) { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers = append(layers, LayerInfo{ BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, - } + }) } return layers } @@ -284,7 +284,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { } // Build the history. convertedHistory := []Schema2History{} - for _, compat := range m.ExtractedV1Compatibility { + for _, compat := range slices.Backward(m.ExtractedV1Compatibility) { hitem := Schema2History{ Created: compat.Created, CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), @@ -292,7 +292,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { Comment: compat.Comment, EmptyLayer: compat.ThrowAway, } - convertedHistory = append([]Schema2History{hitem}, convertedHistory...) + convertedHistory = append(convertedHistory, hitem) } // Build the rootfs information. We need the decompressed sums that we've been // calculating to fill in the DiffIDs. It's expected (but not enforced by us) diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index 0faa866b7f..a18425d0e5 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -166,10 +166,11 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { // getEncryptedMediaType will return the mediatype to its encrypted counterpart and return // an error if the mediatype does not support encryption func getEncryptedMediaType(mediatype string) (string, error) { - if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") { + parts := strings.Split(mediatype, "+") + if slices.Contains(parts[1:], "encrypted") { return "", fmt.Errorf("unsupported mediaType: %q already encrypted", mediatype) } - unsuffixedMediatype := strings.Split(mediatype, "+")[0] + unsuffixedMediatype := parts[0] switch unsuffixedMediatype { case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. diff --git a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go new file mode 100644 index 0000000000..c4eaed0eee --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go @@ -0,0 +1,150 @@ +package internal + +import ( + "errors" + "fmt" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" +) + +// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys +const ( + separator = `(?:[-._:@+]|--)` + alphanum = `(?:[A-Za-z0-9]+)` + component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` +) + +var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) +var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`) + +// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs. +// In any other case an error is returned. +func ValidateImageName(image string) error { + if len(image) == 0 { + return nil + } + + var err error + if !refRegexp.MatchString(image) { + err = fmt.Errorf("Invalid image %s", image) + } + return err +} + +// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image. +// Neither path nor image parts are validated at this stage. +func SplitPathAndImage(reference string) (string, string) { + if runtime.GOOS == "windows" { + return splitPathAndImageWindows(reference) + } + return splitPathAndImageNonWindows(reference) +} + +func splitPathAndImageWindows(reference string) (string, string) { + groups := windowsRefRegexp.FindStringSubmatch(reference) + // nil group means no match + if groups == nil { + return reference, "" + } + + // we expect three elements. First one full match, second the capture group for the path and + // the third the capture group for the image + if len(groups) != 3 { + return reference, "" + } + return groups[1], groups[2] +} + +func splitPathAndImageNonWindows(reference string) (string, string) { + path, image, _ := strings.Cut(reference, ":") // image is set to "" if there is no ":" + return path, image +} + +// ValidateOCIPath takes the OCI path and validates it. +func ValidateOCIPath(path string) error { + if runtime.GOOS == "windows" { + // On Windows we must allow for a ':' as part of the path + if strings.Count(path, ":") > 1 { + return fmt.Errorf("Invalid OCI reference: path %s contains more than one colon", path) + } + } else { + if strings.Contains(path, ":") { + return fmt.Errorf("Invalid OCI reference: path %s contains a colon", path) + } + } + return nil +} + +// ValidateScope validates a policy configuration scope for an OCI transport. +func ValidateScope(scope string) error { + var err error + if runtime.GOOS == "windows" { + err = validateScopeWindows(scope) + } else { + err = validateScopeNonWindows(scope) + } + if err != nil { + return err + } + + cleaned := filepath.Clean(scope) + if cleaned != scope { + return fmt.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + return nil +} + +func validateScopeWindows(scope string) error { + matched, _ := regexp.MatchString(`^[a-zA-Z]:\\`, scope) + if !matched { + return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope) + } + + return nil +} + +func validateScopeNonWindows(scope string) error { + if !strings.HasPrefix(scope, "/") { + return fmt.Errorf("Invalid scope %s: must be an absolute path", scope) + } + + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + + return nil +} + +// parseOCIReferenceName parses the image from the oci reference. +func parseOCIReferenceName(image string) (img string, index int, err error) { + index = -1 + if strings.HasPrefix(image, "@") { + idx, err := strconv.Atoi(image[1:]) + if err != nil { + return "", index, fmt.Errorf("Invalid source index @%s: not an integer: %w", image[1:], err) + } + if idx < 0 { + return "", index, fmt.Errorf("Invalid source index @%d: must not be negative", idx) + } + index = idx + } else { + img = image + } + return img, index, nil +} + +// ParseReferenceIntoElements splits the oci reference into location, image name and source index if exists +func ParseReferenceIntoElements(reference string) (string, string, int, error) { + dir, image := SplitPathAndImage(reference) + image, index, err := parseOCIReferenceName(image) + if err != nil { + return "", "", -1, err + } + return dir, image, index, nil +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go new file mode 100644 index 0000000000..7978229d39 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go @@ -0,0 +1,189 @@ +package layout + +import ( + "context" + "encoding/json" + "fmt" + "io/fs" + "os" + "slices" + + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +// DeleteImage deletes the named image from the directory, if supported. +func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + sharedBlobsDir := "" + if sys != nil && sys.OCISharedBlobDirPath != "" { + sharedBlobsDir = sys.OCISharedBlobDirPath + } + + descriptor, descriptorIndex, err := ref.getManifestDescriptor() + if err != nil { + return err + } + + blobsUsedByImage := make(map[digest.Digest]int) + if err := ref.countBlobsForDescriptor(blobsUsedByImage, &descriptor, sharedBlobsDir); err != nil { + return err + } + + blobsToDelete, err := ref.getBlobsToDelete(blobsUsedByImage, sharedBlobsDir) + if err != nil { + return err + } + + err = ref.deleteBlobs(blobsToDelete) + if err != nil { + return err + } + + return ref.deleteReferenceFromIndex(descriptorIndex) +} + +// countBlobsForDescriptor updates dest with usage counts of blobs required for descriptor, INCLUDING descriptor itself. +func (ref ociReference) countBlobsForDescriptor(dest map[digest.Digest]int, descriptor *imgspecv1.Descriptor, sharedBlobsDir string) error { + blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) + if err != nil { + return err + } + + dest[descriptor.Digest]++ + switch descriptor.MediaType { + case imgspecv1.MediaTypeImageManifest: + manifest, err := parseJSON[imgspecv1.Manifest](blobPath) + if err != nil { + return err + } + dest[manifest.Config.Digest]++ + for _, layer := range manifest.Layers { + dest[layer.Digest]++ + } + case imgspecv1.MediaTypeImageIndex: + index, err := parseIndex(blobPath) + if err != nil { + return err + } + if err := ref.countBlobsReferencedByIndex(dest, index, sharedBlobsDir); err != nil { + return err + } + default: + return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType) + } + return nil +} + +// countBlobsReferencedByIndex updates dest with usage counts of blobs required for index, EXCLUDING the index itself. +func (ref ociReference) countBlobsReferencedByIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error { + for _, descriptor := range index.Manifests { + if err := ref.countBlobsForDescriptor(destination, &descriptor, sharedBlobsDir); err != nil { + return err + } + } + return nil +} + +// This takes in a map of the digest and their usage count in the manifest to be deleted +// It will compare it to the digest usage in the root index, and return a set of the blobs that can be safely deleted +func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[digest.Digest]int, sharedBlobsDir string) (*set.Set[digest.Digest], error) { + rootIndex, err := ref.getIndex() + if err != nil { + return nil, err + } + blobsUsedInRootIndex := make(map[digest.Digest]int) + err = ref.countBlobsReferencedByIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir) + if err != nil { + return nil, err + } + + blobsToDelete := set.New[digest.Digest]() + + for digest, count := range blobsUsedInRootIndex { + if count-blobsUsedByDescriptorToDelete[digest] == 0 { + blobsToDelete.Add(digest) + } + } + + return blobsToDelete, nil +} + +// This transport never generates layouts where blobs for an image are both in the local blobs directory +// and the shared one; it’s either one or the other, depending on how OCISharedBlobDirPath is set. +// +// But we can’t correctly compute use counts for OCISharedBlobDirPath (because we don't know what +// the other layouts sharing that directory are, and we might not even have permission to read them), +// so we can’t really delete any blobs in that case. +// Checking the _local_ blobs directory, and deleting blobs from there, doesn't really hurt, +// in case the layout was created using some other tool or without OCISharedBlobDirPath set, so let's silently +// check for local blobs (but we should make no noise if the blobs are actually in the shared directory). +// +// So, NOTE: the blobPath() call below hard-codes "" even in calls where OCISharedBlobDirPath is set +func (ref ociReference) deleteBlobs(blobsToDelete *set.Set[digest.Digest]) error { + for digest := range blobsToDelete.All() { + blobPath, err := ref.blobPath(digest, "") //Only delete in the local directory, see comment above + if err != nil { + return err + } + err = deleteBlob(blobPath) + if err != nil { + return err + } + } + + return nil +} + +func deleteBlob(blobPath string) error { + logrus.Debug(fmt.Sprintf("Deleting blob at %q", blobPath)) + + err := os.Remove(blobPath) + if err != nil && !os.IsNotExist(err) { + return err + } else { + return nil + } +} + +func (ref ociReference) deleteReferenceFromIndex(referenceIndex int) error { + index, err := ref.getIndex() + if err != nil { + return err + } + + index.Manifests = slices.Delete(index.Manifests, referenceIndex, referenceIndex+1) + + return saveJSON(ref.indexPath(), index) +} + +func saveJSON(path string, content any) (retErr error) { + // If the file already exists, get its mode to preserve it + var mode fs.FileMode + existingfi, err := os.Stat(path) + if err != nil { + if !os.IsNotExist(err) { + return err + } else { // File does not exist, use default mode + mode = 0644 + } + } else { + mode = existingfi.Mode() + } + + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + // since we are writing to this file, make sure we handle errors + defer func() { + closeErr := file.Close() + if retErr == nil { + retErr = closeErr + } + }() + + return json.NewEncoder(file).Encode(content) +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go new file mode 100644 index 0000000000..492ede591b --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -0,0 +1,414 @@ +package layout + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "slices" + + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" + digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type ociImageDestination struct { + impl.Compat + impl.PropertyMethodsInitialize + stubs.IgnoresOriginalOCIConfig + stubs.NoPutBlobPartialInitialize + stubs.NoSignaturesInitialize + + ref ociReference + index imgspecv1.Index + sharedBlobDir string +} + +// newImageDestination returns an ImageDestination for writing to an existing directory. +func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) { + if ref.sourceIndex != -1 { + return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex) + } + var index *imgspecv1.Index + if indexExists(ref) { + var err error + index, err = ref.getIndex() + if err != nil { + return nil, err + } + } else { + index = &imgspecv1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + Annotations: make(map[string]string), + } + } + + desiredLayerCompression := types.Compress + if sys != nil && sys.OCIAcceptUncompressedLayers { + desiredLayerCompression = types.PreserveOriginal + } + + d := &ociImageDestination{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + SupportedManifestMIMETypes: []string{ + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeImageIndex, + }, + DesiredLayerCompression: desiredLayerCompression, + AcceptsForeignLayerURLs: true, + MustMatchRuntimeOS: false, + IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil. + HasThreadSafePutBlob: true, + }), + NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), + NoSignaturesInitialize: stubs.NoSignatures("Pushing signatures for OCI images is not supported"), + + ref: ref, + index: *index, + } + d.Compat = impl.AddCompat(d) + if sys != nil { + d.sharedBlobDir = sys.OCISharedBlobDirPath + } + + if err := ensureDirectoryExists(d.ref.dir); err != nil { + return nil, err + } + // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, + // but it MAY be empty (e.g. if we never end up calling PutBlob) + // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 + if err := ensureDirectoryExists(filepath.Join(d.ref.dir, imgspecv1.ImageBlobsDir)); err != nil { + return nil, err + } + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *ociImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *ociImageDestination) Close() error { + return nil +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (_ private.UploadedBlob, retErr error) { + blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob") + if err != nil { + return private.UploadedBlob{}, err + } + succeeded := false + explicitClosed := false + defer func() { + if !explicitClosed { + closeErr := blobFile.Close() + if retErr == nil { + retErr = closeErr + } + } + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, stream) + if err != nil { + return private.UploadedBlob{}, err + } + blobDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + } + + if err := d.blobFileSyncAndRename(blobFile, blobDigest, &explicitClosed); err != nil { + return private.UploadedBlob{}, err + } + succeeded = true + return private.UploadedBlob{Digest: blobDigest, Size: size}, nil +} + +// blobFileSyncAndRename syncs the specified blobFile on the filesystem and renames it to the +// specific blob path determined by the blobDigest. The closed pointer indicates to the caller +// whether blobFile has been closed or not. +func (d *ociImageDestination) blobFileSyncAndRename(blobFile *os.File, blobDigest digest.Digest, closed *bool) error { + if err := blobFile.Sync(); err != nil { + return err + } + + // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. + // On Windows, the “permissions of newly created files” argument to syscall.Open is + // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, + // always fails on Windows. + if runtime.GOOS != "windows" { + if err := blobFile.Chmod(0644); err != nil { + return err + } + } + + blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir) + if err != nil { + return err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return err + } + + // need to explicitly close the file, since a rename won't otherwise work on Windows + err = blobFile.Close() + if err != nil { + return err + } + *closed = true + + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return err + } + + return nil +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil). +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { + return false, private.ReusedBlob{}, nil + } + if info.Digest == "" { + return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") + } + blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) + if err != nil { + return false, private.ReusedBlob{}, err + } + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, private.ReusedBlob{}, nil + } + if err != nil { + return false, private.ReusedBlob{}, err + } + + return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil +} + +// PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types, +// this should be either an OCI manifest (possibly converted to this format by the caller) or index, +// neither of which we'll need to modify further. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + var digest digest.Digest + var err error + if instanceDigest != nil { + digest = *instanceDigest + } else { + digest, err = manifest.Digest(m) + if err != nil { + return err + } + } + + blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) + if err != nil { + return err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return err + } + if err := os.WriteFile(blobPath, m, 0644); err != nil { + return err + } + + if instanceDigest != nil { + return nil + } + + // If we had platform information, we'd build an imgspecv1.Platform structure here. + + // Start filling out the descriptor for this entry + desc := imgspecv1.Descriptor{} + desc.Digest = digest + desc.Size = int64(len(m)) + if d.ref.image != "" { + desc.Annotations = make(map[string]string) + desc.Annotations[imgspecv1.AnnotationRefName] = d.ref.image + } + + // If we knew the MIME type, we wouldn't have to guess here. + desc.MediaType = manifest.GuessMIMEType(m) + + d.addManifest(&desc) + + return nil +} + +func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { + // If the new entry has a name, remove any conflicting names which we already have. + if desc.Annotations != nil && desc.Annotations[imgspecv1.AnnotationRefName] != "" { + // The name is being set on a new entry, so remove any older ones that had the same name. + // We might be storing an index and all of its component images, and we'll want to attach + // the name to the last one, which is the index. + for i, manifest := range d.index.Manifests { + if manifest.Annotations[imgspecv1.AnnotationRefName] == desc.Annotations[imgspecv1.AnnotationRefName] { + delete(d.index.Manifests[i].Annotations, imgspecv1.AnnotationRefName) + break + } + } + } + // If it has the same digest as another entry in the index, we already overwrote the file, + // so just pick up the other information. + for i, manifest := range d.index.Manifests { + if manifest.Digest == desc.Digest && manifest.Annotations[imgspecv1.AnnotationRefName] == "" { + // Replace it completely. + d.index.Manifests[i] = *desc + return + } + } + // It's a new entry to be added to the index. Use slices.Clone() to avoid a remote dependency on how d.index was created. + d.index.Manifests = append(slices.Clone(d.index.Manifests), *desc) +} + +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (d *ociImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { + layoutBytes, err := json.Marshal(imgspecv1.ImageLayout{ + Version: imgspecv1.ImageLayoutVersion, + }) + if err != nil { + return err + } + if err := os.WriteFile(d.ref.ociLayoutPath(), layoutBytes, 0644); err != nil { + return err + } + indexJSON, err := json.Marshal(d.index) + if err != nil { + return err + } + return os.WriteFile(d.ref.indexPath(), indexJSON, 0644) +} + +// PutBlobFromLocalFileOption is unused but may receive functionality in the future. +type PutBlobFromLocalFileOption struct{} + +// PutBlobFromLocalFile arranges the data from path to be used as blob with digest. +// It computes, and returns, the digest and size of the used file. +// +// This function can be used instead of dest.PutBlob() where the ImageDestination requires PutBlob() to be called. +func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file string, options ...PutBlobFromLocalFileOption) (_ digest.Digest, _ int64, retErr error) { + d, ok := dest.(*ociImageDestination) + if !ok { + return "", -1, errors.New("caller error: PutBlobFromLocalFile called with a non-oci: destination") + } + + succeeded := false + blobFileClosed := false + blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob") + if err != nil { + return "", -1, err + } + defer func() { + if !blobFileClosed { + closeErr := blobFile.Close() + if retErr == nil { + retErr = closeErr + } + } + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + srcFile, err := os.Open(file) + if err != nil { + return "", -1, err + } + defer srcFile.Close() + + err = fileutils.ReflinkOrCopy(srcFile, blobFile) + if err != nil { + return "", -1, err + } + + _, err = blobFile.Seek(0, io.SeekStart) + if err != nil { + return "", -1, err + } + blobDigest, err := digest.FromReader(blobFile) + if err != nil { + return "", -1, err + } + + fileInfo, err := blobFile.Stat() + if err != nil { + return "", -1, err + } + + if err := d.blobFileSyncAndRename(blobFile, blobDigest, &blobFileClosed); err != nil { + return "", -1, err + } + + succeeded = true + return blobDigest, fileInfo.Size(), nil +} + +func ensureDirectoryExists(path string) error { + if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +// ensureParentDirectoryExists ensures the parent of the supplied path exists. +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} + +// indexExists checks whether the index location specified in the OCI reference exists. +// The implementation is opinionated, since in case of unexpected errors false is returned +func indexExists(ref ociReference) bool { + err := fileutils.Exists(ref.indexPath()) + if err == nil { + return true + } + if os.IsNotExist(err) { + return false + } + return true +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go new file mode 100644 index 0000000000..49c070c21f --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -0,0 +1,240 @@ +package layout + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strconv" + + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/pkg/tlsclientconfig" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" + "github.com/docker/go-connections/tlsconfig" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageNotFoundError is used when the OCI structure, in principle, exists and seems valid enough, +// but nothing matches the “image” part of the provided reference. +type ImageNotFoundError struct { + ref ociReference + // We may make members public, or add methods, in the future. +} + +func (e ImageNotFoundError) Error() string { + return fmt.Sprintf("no descriptor found for reference %q", e.ref.image) +} + +type ociImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + impl.NoSignatures + impl.DoesNotAffectLayerInfosForCopy + stubs.NoGetBlobAtInitialize + + ref ociReference + index *imgspecv1.Index + descriptor imgspecv1.Descriptor + client *http.Client + sharedBlobDir string +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(sys *types.SystemContext, ref ociReference) (private.ImageSource, error) { + tr := tlsclientconfig.NewTransport() + tr.TLSClientConfig = tlsconfig.ServerDefault() + + if sys != nil && sys.OCICertPath != "" { + if err := tlsclientconfig.SetupCertificates(sys.OCICertPath, tr.TLSClientConfig); err != nil { + return nil, err + } + tr.TLSClientConfig.InsecureSkipVerify = sys.OCIInsecureSkipTLSVerify + } + + client := &http.Client{} + client.Transport = tr + descriptor, _, err := ref.getManifestDescriptor() + if err != nil { + return nil, err + } + index, err := ref.getIndex() + if err != nil { + return nil, err + } + s := &ociImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: false, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), + + ref: ref, + index: index, + descriptor: descriptor, + client: client, + } + if sys != nil { + // TODO(jonboulle): check dir existence? + s.sharedBlobDir = sys.OCISharedBlobDirPath + } + s.Compat = impl.AddCompat(s) + return s, nil +} + +// Reference returns the reference used to set up this source. +func (s *ociImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ociImageSource) Close() error { + s.client.CloseIdleConnections() + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + var dig digest.Digest + var mimeType string + var err error + + if instanceDigest == nil { + dig = s.descriptor.Digest + mimeType = s.descriptor.MediaType + } else { + dig = *instanceDigest + for _, md := range s.index.Manifests { + if md.Digest == dig { + mimeType = md.MediaType + break + } + } + } + + manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) + if err != nil { + return nil, "", err + } + + m, err := os.ReadFile(manifestPath) + if err != nil { + return nil, "", err + } + if mimeType == "" { + mimeType = manifest.GuessMIMEType(m) + } + + return m, mimeType, nil +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if len(info.URLs) != 0 { + r, s, err := s.getExternalBlob(ctx, info.URLs) + if err != nil { + return nil, 0, err + } else if r != nil { + return r, s, nil + } + } + + path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir) + if err != nil { + return nil, 0, err + } + + r, err := os.Open(path) + if err != nil { + return nil, 0, err + } + fi, err := r.Stat() + if err != nil { + return nil, 0, err + } + return r, fi.Size(), nil +} + +// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty. +// This function can return nil reader when no url is supported by this function. In this case, the caller +// should fallback to fetch the non-external blob (i.e. pull from the registry). +func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { + if len(urls) == 0 { + return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") + } + + errWrap := errors.New("failed fetching external blob from all urls") + hasSupportedURL := false + for _, u := range urls { + if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") { + continue // unsupported url. skip this url. + } + hasSupportedURL = true + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + if err != nil { + errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap) + continue + } + + resp, err := s.client.Do(req) + if err != nil { + errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap) + continue + } + + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + errWrap = fmt.Errorf("fetching %q failed, response code not 200: %w", u, errWrap) + continue + } + + return resp.Body, getBlobSize(resp), nil + } + if !hasSupportedURL { + return nil, 0, nil // fallback to non-external blob + } + + return nil, 0, errWrap +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} + +// GetLocalBlobPath returns the local path to the blob file with the given digest. +// The returned path is checked for existence so when a non existing digest is +// given an error will be returned. +// +// Important: The returned path must be treated as read only, writing the file will +// corrupt the oci layout as the digest no longer matches. +func GetLocalBlobPath(ctx context.Context, src types.ImageSource, digest digest.Digest) (string, error) { + s, ok := src.(*ociImageSource) + if !ok { + return "", errors.New("caller error: GetLocalBlobPath called with a non-oci: source") + } + + path, err := s.ref.blobPath(digest, s.sharedBlobDir) + if err != nil { + return "", err + } + if err := fileutils.Exists(path); err != nil { + return "", err + } + + return path, nil +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go new file mode 100644 index 0000000000..832f89080e --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -0,0 +1,304 @@ +package layout + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/oci/internal" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +func init() { + transports.Register(Transport) +} + +var ( + // Transport is an ImageTransport for OCI directories. + Transport = ociTransport{} + + // ErrMoreThanOneImage is an error returned when the manifest includes + // more than one image and the user should choose which one to use. + ErrMoreThanOneImage = errors.New("more than one image in oci, choose an image") +) + +type ociTransport struct{} + +func (t ociTransport) Name() string { + return "oci" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t ociTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { + return internal.ValidateScope(scope) +} + +// ociReference is an ImageReference for OCI directory paths. +type ociReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedDir may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "dir" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + dir string // As specified by the user. May be relative, contain symlinks, etc. + resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. + // If image=="" && sourceIndex==-1, it means the "only image" in the index.json is used in the case it is a source + // for destinations, the image name annotation "image.ref.name" is not added to the index.json. + // + // Must not be set if sourceIndex is set (the value is not -1). + image string + // If not -1, a zero-based index of an image in the manifest index. Valid only for sources. + // Must not be set if image is set. + sourceIndex int +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +func ParseReference(reference string) (types.ImageReference, error) { + dir, image, index, err := internal.ParseReferenceIntoElements(reference) + if err != nil { + return nil, err + } + return newReference(dir, image, index) +} + +// newReference returns an OCI reference for a directory, and an image name annotation or sourceIndex. +// +// If sourceIndex==-1, the index will not be valid to point out the source image, only image will be used. +// We do not expose an API supplying the resolvedDir; we could, but recomputing it +// is generally cheap enough that we prefer being confident about the properties of resolvedDir. +func newReference(dir, image string, sourceIndex int) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir) + if err != nil { + return nil, err + } + + if err := internal.ValidateOCIPath(dir); err != nil { + return nil, err + } + + if err = internal.ValidateImageName(image); err != nil { + return nil, err + } + + if sourceIndex != -1 && sourceIndex < 0 { + return nil, fmt.Errorf("Invalid oci: layout reference: index @%d must not be negative", sourceIndex) + } + if sourceIndex != -1 && image != "" { + return nil, fmt.Errorf("Invalid oci: layout reference: cannot use both an image %s and a source index @%d", image, sourceIndex) + } + return ociReference{dir: dir, resolvedDir: resolved, image: image, sourceIndex: sourceIndex}, nil +} + +// NewIndexReference returns an OCI reference for a path and a zero-based source manifest index. +func NewIndexReference(dir string, sourceIndex int) (types.ImageReference, error) { + if sourceIndex < 0 { + return nil, fmt.Errorf("invalid call to NewIndexReference with negative index %d", sourceIndex) + } + return newReference(dir, "", sourceIndex) +} + +// NewReference returns an OCI reference for a directory and an optional image name annotation (if not ""). +func NewReference(dir, image string) (types.ImageReference, error) { + return newReference(dir, image, -1) +} + +func (ref ociReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref ociReference) StringWithinTransport() string { + if ref.sourceIndex == -1 { + return fmt.Sprintf("%s:%s", ref.dir, ref.image) + } + return fmt.Sprintf("%s:@%d", ref.dir, ref.sourceIndex) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref ociReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref ociReference) PolicyConfigurationIdentity() string { + // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the + // same image and the two can’t be statically disambiguated. Using at least the repository directory is + // less granular but hopefully still useful. + return ref.resolvedDir +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref ociReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedDir + for { + lastSlash := strings.LastIndex(path, "/") + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by ociTransport.ValidatePolicyConfigurationScope above. + if lastSlash == -1 || path == "/" { + break + } + res = append(res, path) + path = path[:lastSlash] + } + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return image.FromReference(ctx, sys, ref) +} + +// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together +// with an error. +func (ref ociReference) getIndex() (*imgspecv1.Index, error) { + return parseIndex(ref.indexPath()) +} + +func parseIndex(path string) (*imgspecv1.Index, error) { + return parseJSON[imgspecv1.Index](path) +} + +func parseJSON[T any](path string) (*T, error) { + content, err := os.Open(path) + if err != nil { + return nil, err + } + defer content.Close() + + obj := new(T) + if err := json.NewDecoder(content).Decode(obj); err != nil { + return nil, err + } + return obj, nil +} + +func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, error) { + index, err := ref.getIndex() + if err != nil { + return imgspecv1.Descriptor{}, -1, err + } + + switch { + case ref.image != "" && ref.sourceIndex != -1: // Coverage: newReference refuses to create such references. + return imgspecv1.Descriptor{}, -1, fmt.Errorf("Internal error: Cannot have both ref %s and source index @%d", + ref.image, ref.sourceIndex) + + case ref.sourceIndex != -1: + if ref.sourceIndex >= len(index.Manifests) { + return imgspecv1.Descriptor{}, -1, fmt.Errorf("index %d is too large, only %d entries available", ref.sourceIndex, len(index.Manifests)) + } + return index.Manifests[ref.sourceIndex], ref.sourceIndex, nil + + case ref.image != "": + // if image specified, look through all manifests for a match + var unsupportedMIMETypes []string + for i, md := range index.Manifests { + if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image { + if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex || md.MediaType == manifest.DockerV2Schema2MediaType || md.MediaType == manifest.DockerV2ListMediaType { + return md, i, nil + } + unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType) + } + } + if len(unsupportedMIMETypes) != 0 { + return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes) + } + return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref} + + default: + // return manifest if only one image is in the oci directory + if len(index.Manifests) != 1 { + // ask user to choose image when more than one image in the oci directory + return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage + } + return index.Manifests[0], 0, nil + } +} + +// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name +// when pulling an image +func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + ociRef, ok := imgRef.(ociReference) + if !ok { + return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociRef") + } + md, _, err := ociRef.getManifestDescriptor() + return md, err +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ociReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. +func (ref ociReference) ociLayoutPath() string { + return filepath.Join(ref.dir, imgspecv1.ImageLayoutFile) +} + +// indexPath returns a path for the index.json within a directory using OCI conventions. +func (ref ociReference) indexPath() string { + return filepath.Join(ref.dir, imgspecv1.ImageIndexFile) +} + +// blobPath returns a path for a blob within a directory using OCI image-layout conventions. +func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { + if err := digest.Validate(); err != nil { + return "", fmt.Errorf("unexpected digest reference %s: %w", digest, err) + } + var blobDir string + if sharedBlobDir != "" { + blobDir = sharedBlobDir + } else { + blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir) + } + return filepath.Join(blobDir, digest.Algorithm().String(), digest.Encoded()), nil +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/reader.go b/vendor/github.com/containers/image/v5/oci/layout/reader.go new file mode 100644 index 0000000000..112db2d705 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/reader.go @@ -0,0 +1,52 @@ +package layout + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/containers/image/v5/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// This file is named reader.go for consistency with other transports’ +// handling of “image containers”, but we don’t actually need a stateful reader object. + +// ListResult wraps the image reference and the manifest for loading +type ListResult struct { + Reference types.ImageReference + ManifestDescriptor imgspecv1.Descriptor +} + +// List returns a slice of manifests included in the archive +func List(dir string) ([]ListResult, error) { + var res []ListResult + + indexJSON, err := os.ReadFile(filepath.Join(dir, imgspecv1.ImageIndexFile)) + if err != nil { + return nil, err + } + var index imgspecv1.Index + if err := json.Unmarshal(indexJSON, &index); err != nil { + return nil, err + } + + for manifestIndex, md := range index.Manifests { + refName := md.Annotations[imgspecv1.AnnotationRefName] + index := -1 + if refName == "" { + index = manifestIndex + } + ref, err := newReference(dir, refName, index) + if err != nil { + return nil, fmt.Errorf("error creating image reference: %w", err) + } + reference := ListResult{ + Reference: ref, + ManifestDescriptor: md, + } + res = append(res, reference) + } + return res, nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go new file mode 100644 index 0000000000..b413ec5131 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go @@ -0,0 +1,88 @@ +package blobinfocache + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/containers/image/v5/internal/rootless" + "github.com/containers/image/v5/pkg/blobinfocache/memory" + "github.com/containers/image/v5/pkg/blobinfocache/sqlite" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +const ( + // blobInfoCacheFilename is the file name used for blob info caches. + // If the format changes in an incompatible way, increase the version number. + blobInfoCacheFilename = "blob-info-cache-v1.sqlite" + // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes. + systemBlobInfoCacheDir = "/var/lib/containers/cache" +) + +// blobInfoCacheDir returns a path to a blob info cache appropriate for sys and euid. +// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory. +func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) { + if sys != nil && sys.BlobInfoCacheDir != "" { + return sys.BlobInfoCacheDir, nil + } + + // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged + // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe. + if euid == 0 { + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil + } + return systemBlobInfoCacheDir, nil + } + + // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts. + dataDir := os.Getenv("XDG_DATA_HOME") + if dataDir == "" { + home := os.Getenv("HOME") + if home == "" { + return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty") + } + dataDir = filepath.Join(home, ".local", "share") + } + return filepath.Join(dataDir, "containers", "cache"), nil +} + +// DefaultCache returns the default BlobInfoCache implementation appropriate for sys. +func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { + dir, err := blobInfoCacheDir(sys, rootless.GetRootlessEUID()) + if err != nil { + logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename) + return memory.New() + } + path := filepath.Join(dir, blobInfoCacheFilename) + if err := os.MkdirAll(dir, 0700); err != nil { + logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", path, err) + return memory.New() + } + + // It might make sense to keep a single sqlite cache object, and a single initialized sqlite connection, open + // as global singleton, for the vast majority of callers who don’t override thde cache location. + // OTOH that would keep a file descriptor open forever, even for long-term callers who copy images rarely, + // and the performance benefit to this over using an Open()/Close() pair for a single image copy is < 10%. + + cache, err := sqlite.New(path) + if err != nil { + logrus.Debugf("Error creating a SQLite blob info cache at %s, using a memory-only cache: %v", path, err) + return memory.New() + } + logrus.Debugf("Using SQLite blob info cache at %s", path) + return cache +} + +// CleanupDefaultCache removes the blob info cache directory. +// It deletes the cache directory but it does not affect any file or memory buffer currently +// in use. +func CleanupDefaultCache(sys *types.SystemContext) error { + dir, err := blobInfoCacheDir(sys, rootless.GetRootlessEUID()) + if err != nil { + // Mirror the DefaultCache behavior that does not fail in this case + return nil + } + return os.RemoveAll(dir) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go new file mode 100644 index 0000000000..d73aafbdb1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -0,0 +1,244 @@ +// Package prioritize provides utilities for filtering and prioritizing locations in +// types.BlobInfoCache.CandidateLocations. +package prioritize + +import ( + "cmp" + "slices" + "time" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates, +// and therefore ultimately by types.BlobInfoCache.CandidateLocations. +// This is a heuristic/guess, and could well use a different value. +const replacementAttempts = 5 + +// replacementUnknownLocationAttempts is the number of blob replacement candidates with unknown Location returned by destructivelyPrioritizeReplacementCandidates, +// and therefore ultimately by blobinfocache.BlobInfoCache2.CandidateLocations2. +// This is a heuristic/guess, and could well use a different value. +const replacementUnknownLocationAttempts = 2 + +// CandidateTemplate is a subset of BICReplacementCandidate2 with data related to a specific digest, +// which can be later combined with information about a location. +type CandidateTemplate struct { + digest digest.Digest + compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed + compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed + compressionAnnotations map[string]string // If necessary, annotations necessary to use compressionAlgorithm +} + +// CandidateTemplateWithCompression returns a CandidateTemplate if a blob with data is acceptable +// for a CandidateLocations* call with v2Options. +// +// v2Options can be set to nil if the call is CandidateLocations (i.e. compression is not required to be known); +// if not nil, the call is assumed to be CandidateLocations2. +func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, data blobinfocache.DigestCompressorData) *CandidateTemplate { + if v2Options == nil { + return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm and compressionAnnotations values are not used. + digest: digest, + } + } + + requiredCompression := "nil" + if v2Options.RequiredCompression != nil { + requiredCompression = v2Options.RequiredCompression.Name() + } + switch data.BaseVariantCompressor { + case blobinfocache.Uncompressed: + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, nil) { + logrus.Debugf("Ignoring BlobInfoCache record of digest %q, uncompressed format does not match required %s or MIME types %#v", + digest.String(), requiredCompression, v2Options.PossibleManifestFormats) + return nil + } + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Decompress, + compressionAlgorithm: nil, + compressionAnnotations: nil, + } + case blobinfocache.UnknownCompression: + logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String()) + return nil // Not allowed with CandidateLocations2 + default: + // See if we can use the specific variant, first. + if data.SpecificVariantCompressor != blobinfocache.UnknownCompression { + algo, err := compression.AlgorithmByName(data.SpecificVariantCompressor) + if err != nil { + logrus.Debugf("Not considering unrecognized specific compression variant %q for BlobInfoCache record of digest %q: %v", + data.SpecificVariantCompressor, digest.String(), err) + } else { + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, &algo) { + logrus.Debugf("Ignoring specific compression variant %q for BlobInfoCache record of digest %q, it does not match required %s or MIME types %#v", + data.SpecificVariantCompressor, digest.String(), requiredCompression, v2Options.PossibleManifestFormats) + } else { + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Compress, + compressionAlgorithm: &algo, + compressionAnnotations: data.SpecificVariantAnnotations, + } + } + } + } + + // Try the base variant. + algo, err := compression.AlgorithmByName(data.BaseVariantCompressor) + if err != nil { + logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v", + digest.String(), data.BaseVariantCompressor, err) + return nil // The BICReplacementCandidate2.CompressionAlgorithm field is required + } + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, &algo) { + logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v", + digest.String(), data.BaseVariantCompressor, requiredCompression, v2Options.PossibleManifestFormats) + return nil + } + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Compress, + compressionAlgorithm: &algo, + compressionAnnotations: nil, + } + } +} + +// CandidateWithTime is the input to types.BICReplacementCandidate prioritization. +type CandidateWithTime struct { + candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate + lastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation) +} + +// CandidateWithLocation returns a complete CandidateWithTime combining (template from CandidateTemplateWithCompression, location, lastSeen) +func (template CandidateTemplate) CandidateWithLocation(location types.BICLocationReference, lastSeen time.Time) CandidateWithTime { + return CandidateWithTime{ + candidate: blobinfocache.BICReplacementCandidate2{ + Digest: template.digest, + CompressionOperation: template.compressionOperation, + CompressionAlgorithm: template.compressionAlgorithm, + CompressionAnnotations: template.compressionAnnotations, + UnknownLocation: false, + Location: location, + }, + lastSeen: lastSeen, + } +} + +// CandidateWithUnknownLocation returns a complete CandidateWithTime for a template from CandidateTemplateWithCompression and an unknown location. +func (template CandidateTemplate) CandidateWithUnknownLocation() CandidateWithTime { + return CandidateWithTime{ + candidate: blobinfocache.BICReplacementCandidate2{ + Digest: template.digest, + CompressionOperation: template.compressionOperation, + CompressionAlgorithm: template.compressionAlgorithm, + CompressionAnnotations: template.compressionAnnotations, + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, + }, + lastSeen: time.Time{}, + } +} + +// candidateSortState is a closure for a comparison used by slices.SortFunc on candidates to prioritize, +// along with the specially-treated digest values relevant to the ordering. +type candidateSortState struct { + primaryDigest digest.Digest // The digest the user actually asked for + uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest +} + +func (css *candidateSortState) compare(xi, xj CandidateWithTime) int { + // primaryDigest entries come first, more recent first. + // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first. + // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) + + // First, deal with the primaryDigest/uncompressedDigest cases: + if xi.candidate.Digest != xj.candidate.Digest { + // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter + if xi.candidate.Digest == css.primaryDigest { + return -1 + } + if xj.candidate.Digest == css.primaryDigest { + return 1 + } + if css.uncompressedDigest != "" { + if xi.candidate.Digest == css.uncompressedDigest { + return 1 + } + if xj.candidate.Digest == css.uncompressedDigest { + return -1 + } + } + } else { // xi.Candidate.Digest == xj.Candidate.Digest + // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time + if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) { + return -xi.lastSeen.Compare(xj.lastSeen) + } + } + + // Neither of the digests are primaryDigest/uncompressedDigest: + if cmp := xi.lastSeen.Compare(xj.lastSeen); cmp != 0 { // Order primarily by time + return -cmp + } + // Fall back to digest, if timestamps end up _exactly_ the same (how?!) + return cmp.Compare(xi.candidate.Digest, xj.candidate.Digest) +} + +// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the +// number of entries to limit for known and unknown location separately, only to make testing simpler. +func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 { + // split unknown candidates and known candidates + // and limit them separately. + var knownLocationCandidates []CandidateWithTime + var unknownLocationCandidates []CandidateWithTime + // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should + // compare equal. + slices.SortFunc(cs, (&candidateSortState{ + primaryDigest: primaryDigest, + uncompressedDigest: uncompressedDigest, + }).compare) + for _, candidate := range cs { + if candidate.candidate.UnknownLocation { + unknownLocationCandidates = append(unknownLocationCandidates, candidate) + } else { + knownLocationCandidates = append(knownLocationCandidates, candidate) + } + } + + knownLocationCandidatesUsed := min(len(knownLocationCandidates), totalLimit) + remainingCapacity := totalLimit - knownLocationCandidatesUsed + unknownLocationCandidatesUsed := min(noLocationLimit, remainingCapacity, len(unknownLocationCandidates)) + res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed) + for i := 0; i < knownLocationCandidatesUsed; i++ { + res[i] = knownLocationCandidates[i].candidate + } + // If candidates with unknown location are found, lets add them to final list + for i := 0; i < unknownLocationCandidatesUsed; i++ { + res = append(res, unknownLocationCandidates[i].candidate) + } + return res +} + +// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, +// the primary digest the user actually asked for, the corresponding uncompressed digest (if known, possibly equal to the primary digest) returns an +// appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. +// +// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course +// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) +func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 { + return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts, replacementUnknownLocationAttempts) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go new file mode 100644 index 0000000000..8e513d41e3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -0,0 +1,255 @@ +// Package memory implements an in-memory BlobInfoCache. +package memory + +import ( + "sync" + "time" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// locationKey only exists to make lookup in knownLocations easier. +type locationKey struct { + transport string + scope types.BICTransportScope + blobDigest digest.Digest +} + +// cache implements an in-memory-only BlobInfoCache. +type cache struct { + mutex sync.Mutex + // The following fields can only be accessed with mutex held. + uncompressedDigests map[digest.Digest]digest.Digest + uncompressedDigestsByTOC map[digest.Digest]digest.Digest + digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest + knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference + compressors map[digest.Digest]blobinfocache.DigestCompressorData // stores compression data for each digest; BaseVariantCompressor != UnknownCompression +} + +// New returns a BlobInfoCache implementation which is in-memory only. +// +// This is primarily intended for tests, but also used as a fallback +// if blobinfocache.DefaultCache can’t determine, or set up, the +// location for a persistent cache. Most users should use +// blobinfocache.DefaultCache. instead of calling this directly. +// Manual users of types.{ImageSource,ImageDestination} might also use +// this instead of a persistent cache. +func New() types.BlobInfoCache { + return new2() +} + +func new2() *cache { + return &cache{ + uncompressedDigests: map[digest.Digest]digest.Digest{}, + uncompressedDigestsByTOC: map[digest.Digest]digest.Digest{}, + digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{}, + knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, + compressors: map[digest.Digest]blobinfocache.DigestCompressorData{}, + } +} + +// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). +// Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). +func (mem *cache) Open() { +} + +// Close destroys state created by Open(). +func (mem *cache) Close() { +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + mem.mutex.Lock() + defer mem.mutex.Unlock() + return mem.uncompressedDigestLocked(anyDigest) +} + +// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held. +func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest { + if d, ok := mem.uncompressedDigests[anyDigest]; ok { + return d + } + // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + if s, ok := mem.digestsByUncompressed[anyDigest]; ok && !s.Empty() { + return anyDigest + } + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + mem.uncompressedDigests[anyDigest] = uncompressed + + anyDigestSet, ok := mem.digestsByUncompressed[uncompressed] + if !ok { + anyDigestSet = set.New[digest.Digest]() + mem.digestsByUncompressed[uncompressed] = anyDigestSet + } + anyDigestSet.Add(anyDigest) +} + +// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. +// Returns "" if the uncompressed digest is unknown. +func (mem *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if d, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok { + return d + } + return "" +} + +// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (mem *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok && previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed) + } + mem.uncompressedDigestsByTOC[tocDigest] = uncompressed +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest} + locationScope, ok := mem.knownLocations[key] + if !ok { + locationScope = map[types.BICLocationReference]time.Time{} + mem.knownLocations[key] = locationScope + } + locationScope[location] = time.Now() // Possibly overwriting an older entry. +} + +// RecordDigestCompressorData records data for the blob with the specified digest. +// WARNING: Only call this with LOCALLY VERIFIED data: +// - don’t record a compressor for a digest just because some remote author claims so +// (e.g. because a manifest says so); +// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant +// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them +// in a manifest) +// +// otherwise the cache could be poisoned and cause us to make incorrect edits to type +// information in a manifest. +func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.compressors[anyDigest]; ok { + if previous.BaseVariantCompressor != data.BaseVariantCompressor { + logrus.Warnf("Base compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.BaseVariantCompressor, data.BaseVariantCompressor) + } else if previous.SpecificVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor != blobinfocache.UnknownCompression && + previous.SpecificVariantCompressor != data.SpecificVariantCompressor { + logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.SpecificVariantCompressor, data.SpecificVariantCompressor) + } + // We don’t check SpecificVariantAnnotations for equality, it’s possible that their generation is not deterministic. + + // Preserve specific variant information if the incoming data does not have it. + if data.BaseVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor == blobinfocache.UnknownCompression && + previous.SpecificVariantCompressor != blobinfocache.UnknownCompression { + data.SpecificVariantCompressor = previous.SpecificVariantCompressor + data.SpecificVariantAnnotations = previous.SpecificVariantAnnotations + } + } + if data.BaseVariantCompressor == blobinfocache.UnknownCompression { + delete(mem.compressors, anyDigest) + return + } + mem.compressors[anyDigest] = data +} + +// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory +// with corresponding compression info from mem.compressors, and returns the result of appending +// them to candidates. +// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates +// with unknown compression. +func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, + v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime { + compressionData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } + if v, ok := mem.compressors[digest]; ok { + compressionData = v + } + template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData) + if template == nil { + return candidates + } + locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present + if len(locations) > 0 { + for l, t := range locations { + candidates = append(candidates, template.CandidateWithLocation(l, t)) + } + } else if v2Options != nil { + candidates = append(candidates, template.CandidateWithUnknownLocation()) + } + return candidates +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, nil)) +} + +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) +// that could possibly be reused within the specified (transport scope) (if they still +// exist, which is not guaranteed). +func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 { + return mem.candidateLocations(transport, scope, primaryDigest, options.CanSubstitute, &options) +} + +// candidateLocations implements CandidateLocations / CandidateLocations2. +// v2Options is not nil if the caller is CandidateLocations2. +func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool, + v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 { + mem.mutex.Lock() + defer mem.mutex.Unlock() + res := []prioritize.CandidateWithTime{} + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Options) + var uncompressedDigest digest.Digest // = "" + if canSubstitute { + if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { + otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map + if otherDigests != nil { + for d := range otherDigests.All() { + if d != primaryDigest && d != uncompressedDigest { + res = mem.appendReplacementCandidates(res, transport, scope, d, v2Options) + } + } + } + if uncompressedDigest != primaryDigest { + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Options) + } + } + } + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go new file mode 100644 index 0000000000..719c8edaff --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go @@ -0,0 +1,682 @@ +// Package boltdb implements a BlobInfoCache backed by SQLite. +package sqlite + +import ( + "database/sql" + "encoding/json" + "errors" + "fmt" + "sync" + "time" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/v5/types" + _ "github.com/mattn/go-sqlite3" // Registers the "sqlite3" backend backend for database/sql + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +const ( + // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade + // we can simply start over with a different filename; update blobInfoCacheFilename. + // That also means we don’t have to worry about co-existing readers/writers which know different versions of the schema + // (which would require compatibility in both directions). + + // Assembled sqlite options used when opening the database. + sqliteOptions = "?" + + // Deal with timezone automatically. + // go-sqlite3 always _records_ timestamps as a text: time in local time + a time zone offset. + // _loc affects how the values are _parsed_: (which timezone is assumed for numeric timestamps or for text which does not specify an offset, or) + // if the time zone offset matches the specified time zone, the timestamp is assumed to be in that time zone / location; + // (otherwise an unnamed time zone carrying just a hard-coded offset, but no location / DST rules is used). + "_loc=auto" + + // Force an fsync after each transaction (https://www.sqlite.org/pragma.html#pragma_synchronous). + "&_sync=FULL" + + // Allow foreign keys (https://www.sqlite.org/pragma.html#pragma_foreign_keys). + // We don’t currently use any foreign keys, but this is a good choice long-term (not default in SQLite only for historical reasons). + "&_foreign_keys=1" + + // Use BEGIN EXCLUSIVE (https://www.sqlite.org/lang_transaction.html); + // i.e. obtain a write lock for _all_ transactions at the transaction start (never use a read lock, + // never upgrade from a read to a write lock - that can fail if multiple read lock owners try to do that simultaneously). + // + // This, together with go-sqlite3’s default for _busy_timeout=5000, means that we should never see a “database is locked” error, + // the database should block on the exclusive lock when starting a transaction, and the problematic case of two simultaneous + // holders of a read lock trying to upgrade to a write lock (and one necessarily failing) is prevented. + // Compare https://github.com/mattn/go-sqlite3/issues/274 . + // + // Ideally the BEGIN / BEGIN EXCLUSIVE decision could be made per-transaction, compare https://github.com/mattn/go-sqlite3/pull/1167 + // or https://github.com/mattn/go-sqlite3/issues/400 . + // The currently-proposed workaround is to create two different SQL “databases” (= connection pools) with different _txlock settings, + // which seems rather wasteful. + "&_txlock=exclusive" +) + +// cache is a BlobInfoCache implementation which uses a SQLite file at the specified path. +type cache struct { + path string + + // The database/sql package says “It is rarely necessary to close a DB.”, and steers towards a long-term *sql.DB connection pool. + // That’s probably very applicable for database-backed services, where the database is the primary data store. That’s not necessarily + // the case for callers of c/image, where image operations might be a small proportion of the total runtime, and the cache is fairly + // incidental even to the image operations. It’s also hard for us to use that model, because the public BlobInfoCache object doesn’t have + // a Close method, so creating a lot of single-use caches could leak data. + // + // Instead, the private BlobInfoCache2 interface provides Open/Close methods, and they are called by c/image/copy.Image. + // This amortizes the cost of opening/closing the SQLite state over a single image copy, while keeping no long-term resources open. + // Some rough benchmarks in https://github.com/containers/image/pull/2092 suggest relative costs on the order of "25" for a single + // *sql.DB left open long-term, "27" for a *sql.DB open for a single image copy, and "40" for opening/closing a *sql.DB for every + // single transaction; so the Open/Close per image copy seems a reasonable compromise (especially compared to the previous implementation, + // somewhere around "700"). + + lock sync.Mutex + // The following fields can only be accessed with lock held. + refCount int // number of outstanding Open() calls + db *sql.DB // nil if not set (may happen even if refCount > 0 on errors) +} + +// New returns BlobInfoCache implementation which uses a SQLite file at path. +// +// Most users should call blobinfocache.DefaultCache instead. +func New(path string) (types.BlobInfoCache, error) { + return new2(path) +} + +func new2(path string) (*cache, error) { + db, err := rawOpen(path) + if err != nil { + return nil, fmt.Errorf("initializing blob info cache at %q: %w", path, err) + } + err = func() (retErr error) { // A scope for defer + defer func() { + closeErr := db.Close() + if retErr == nil { + retErr = closeErr + } + }() + // We don’t check the schema before every operation, because that would be costly + // and because we assume schema changes will be handled by using a different path. + return ensureDBHasCurrentSchema(db) + }() + if err != nil { + return nil, err + } + return &cache{ + path: path, + refCount: 0, + db: nil, + }, nil +} + +// rawOpen returns a new *sql.DB for path. +// The caller should arrange for it to be .Close()d. +func rawOpen(path string) (*sql.DB, error) { + // This exists to centralize the use of sqliteOptions. + return sql.Open("sqlite3", path+sqliteOptions) +} + +// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). +// Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). +func (sqc *cache) Open() { + sqc.lock.Lock() + defer sqc.lock.Unlock() + + if sqc.refCount == 0 { + db, err := rawOpen(sqc.path) + if err != nil { + logrus.Warnf("Error opening (previously-successfully-opened) blob info cache at %q: %v", sqc.path, err) + db = nil // But still increase sqc.refCount, because a .Close() will happen + } + sqc.db = db + } + sqc.refCount++ +} + +// Close destroys state created by Open(). +func (sqc *cache) Close() { + sqc.lock.Lock() + defer sqc.lock.Unlock() + + switch sqc.refCount { + case 0: + logrus.Errorf("internal error using pkg/blobinfocache/sqlite.cache: Close() without a matching Open()") + return + case 1: + if sqc.db != nil { + sqc.db.Close() + sqc.db = nil + } + } + sqc.refCount-- +} + +type void struct{} // So that we don’t have to write struct{}{} all over the place + +// transaction calls fn within a read-write transaction in sqc. +func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (_ T, retErr error) { + db, closeDB, err := func() (*sql.DB, func() error, error) { // A scope for defer + sqc.lock.Lock() + defer sqc.lock.Unlock() + + if sqc.db != nil { + return sqc.db, func() error { return nil }, nil + } + db, err := rawOpen(sqc.path) + if err != nil { + return nil, nil, fmt.Errorf("opening blob info cache at %q: %w", sqc.path, err) + } + return db, db.Close, nil + }() + if err != nil { + var zeroRes T // A zero value of T + return zeroRes, err + } + defer func() { + closeErr := closeDB() + if retErr == nil { + retErr = closeErr + } + }() + + return dbTransaction(db, fn) +} + +// dbTransaction calls fn within a read-write transaction in db. +func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) { + // Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive discussion. + + var zeroRes T // A zero value of T + + tx, err := db.Begin() + if err != nil { + return zeroRes, fmt.Errorf("beginning transaction: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + if err := tx.Rollback(); err != nil { + logrus.Errorf("Rolling back transaction: %v", err) + } + } + }() + + res, err := fn(tx) + if err != nil { + return zeroRes, err + } + if err := tx.Commit(); err != nil { + return zeroRes, fmt.Errorf("committing transaction: %w", err) + } + + succeeded = true + return res, nil +} + +// querySingleValue executes a SELECT which is expected to return at most one row with a single column. +// It returns (value, true, nil) on success, or (value, false, nil) if no row was returned. +func querySingleValue[T any](tx *sql.Tx, query string, params ...any) (T, bool, error) { + var value T + if err := tx.QueryRow(query, params...).Scan(&value); err != nil { + var zeroValue T // A zero value of T + if errors.Is(err, sql.ErrNoRows) { + return zeroValue, false, nil + } + return zeroValue, false, err + } + return value, true, nil +} + +// ensureDBHasCurrentSchema adds the necessary tables and indices to a database. +// This is typically used when creating a previously-nonexistent database. +// We don’t really anticipate schema migrations; with c/image usually vendored, not using +// shared libraries, migrating a schema on an existing database would affect old-version users. +// Instead, schema changes are likely to be implemented by using a different cache file name, +// and leaving existing caches around for old users. +func ensureDBHasCurrentSchema(db *sql.DB) error { + // Considered schema design alternatives: + // + // (Overall, considering the overall network latency and disk I/O costs of many-megabyte layer pulls which are happening while referring + // to the blob info cache, it seems reasonable to prioritize readability over microoptimization of this database.) + // + // * This schema uses the text representation of digests. + // + // We use the fairly wasteful text with hexadecimal digits because digest.Digest does not define a binary representation; + // and the way digest.Digest.Hex() is deprecated in favor of digest.Digest.Encoded(), and the way digest.Algorithm + // is documented to “define the string encoding” suggests that assuming a hexadecimal representation and turning that + // into binary ourselves is not a good idea in general; we would have to special-case the currently-known algorithm + // — and that would require us to implement two code paths, one of them basically never exercised / never tested. + // + // * There are two separate items for recording the uncompressed digest and digest compressors. + // Alternatively, we could have a single "digest facts" table with NULLable columns. + // + // The way the BlobInfoCache API works, we are only going to write one value at a time, so + // sharing a table would not be any more efficient for writes (same number of lookups, larger row tuples). + // Reads in candidateLocations would not be more efficient either, the searches in DigestCompressors and DigestUncompressedPairs + // do not coincide (we want a compressor for every candidate, but the uncompressed digest only for the primary digest; and then + // we search in DigestUncompressedPairs by uncompressed digest, not by the primary key). + // + // Also, using separate items allows the single-item writes to be done using a simple INSERT OR REPLACE, instead of having to + // do a more verbose ON CONFLICT(…) DO UPDATE SET … = …. + // + // * Joins (the two that exist in appendReplacementCandidates) are based on the text representation of digests. + // + // Using integer primary keys might make the joins themselves a bit more efficient, but then we would need to involve an extra + // join to translate from/to the user-provided digests anyway. If anything, that extra join (potentially more btree lookups) + // is probably costlier than comparing a few more bytes of data. + // + // Perhaps more importantly, storing digest texts directly makes the database dumps much easier to read for humans without + // having to do extra steps to decode the integers into digest values (either by running sqlite commands with joins, or mentally). + // + items := []struct{ itemName, command string }{ + { + "DigestUncompressedPairs", + `CREATE TABLE IF NOT EXISTS DigestUncompressedPairs(` + + // index implied by PRIMARY KEY + `anyDigest TEXT PRIMARY KEY NOT NULL,` + + // DigestUncompressedPairs_index_uncompressedDigest + `uncompressedDigest TEXT NOT NULL + )`, + }, + { + "DigestUncompressedPairs_index_uncompressedDigest", + `CREATE INDEX IF NOT EXISTS DigestUncompressedPairs_index_uncompressedDigest ON DigestUncompressedPairs(uncompressedDigest)`, + }, + { + "DigestCompressors", + `CREATE TABLE IF NOT EXISTS DigestCompressors(` + + // index implied by PRIMARY KEY + `digest TEXT PRIMARY KEY NOT NULL,` + + // May include blobinfocache.Uncompressed (not blobinfocache.UnknownCompression). + `compressor TEXT NOT NULL + )`, + }, + { + "KnownLocations", + `CREATE TABLE IF NOT EXISTS KnownLocations( + transport TEXT NOT NULL, + scope TEXT NOT NULL, + digest TEXT NOT NULL, + location TEXT NOT NULL,` + + // TIMESTAMP is parsed by SQLITE as a NUMERIC affinity, but go-sqlite3 stores text in the (Go formatting semantics) + // format "2006-01-02 15:04:05.999999999-07:00". + // See also the _loc option in the sql.Open data source name. + `time TIMESTAMP NOT NULL,` + + // Implies an index. + // We also search by (transport, scope, digest), that doesn’t need an extra index + // because it is a prefix of the implied primary-key index. + `PRIMARY KEY (transport, scope, digest, location) + )`, + }, + { + "DigestTOCUncompressedPairs", + `CREATE TABLE IF NOT EXISTS DigestTOCUncompressedPairs(` + + // index implied by PRIMARY KEY + `tocDigest TEXT PRIMARY KEY NOT NULL,` + + `uncompressedDigest TEXT NOT NULL + )`, + }, + { + "DigestSpecificVariantCompressors", // If changing the schema incompatibly, merge this with DigestCompressors. + `CREATE TABLE IF NOT EXISTS DigestSpecificVariantCompressors(` + + // index implied by PRIMARY KEY + `digest TEXT PRIMARY KEY NOT NULL,` + + // The compressor is not `UnknownCompression`. + `specificVariantCompressor TEXT NOT NULL, + specificVariantAnnotations BLOB NOT NULL + )`, + }, + } + + _, err := dbTransaction(db, func(tx *sql.Tx) (void, error) { + // If the the last-created item exists, assume nothing needs to be done. + lastItemName := items[len(items)-1].itemName + _, found, err := querySingleValue[int](tx, "SELECT 1 FROM sqlite_schema WHERE name=?", lastItemName) + if err != nil { + return void{}, fmt.Errorf("checking if SQLite schema item %q exists: %w", lastItemName, err) + } + if !found { + // Item does not exist, assuming a fresh database. + for _, i := range items { + if _, err := tx.Exec(i.command); err != nil { + return void{}, fmt.Errorf("creating item %s: %w", i.itemName, err) + } + } + } + return void{}, nil + }) + return err +} + +// uncompressedDigest implements types.BlobInfoCache.UncompressedDigest within a transaction. +func (sqc *cache) uncompressedDigest(tx *sql.Tx, anyDigest digest.Digest) (digest.Digest, error) { + uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String()) + if err != nil { + return "", err + } + if found { + d, err := digest.Parse(uncompressedString) + if err != nil { + return "", err + } + return d, nil + + } + // A record as uncompressedDigest implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + _, found, err = querySingleValue[int](tx, "SELECT 1 FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", anyDigest.String()) + if err != nil { + return "", err + } + if found { + return anyDigest, nil + } + return "", nil +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (sqc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) { + return sqc.uncompressedDigest(tx, anyDigest) + }) + if err != nil { + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (sqc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for uncompressed digest for %q", anyDigest) + } + if gotPrevious { + previous, err := digest.Parse(previousString) + if err != nil { + return void{}, err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestUncompressedPairs(anyDigest, uncompressedDigest) VALUES (?, ?)", + anyDigest.String(), uncompressed.String()); err != nil { + return void{}, fmt.Errorf("recording uncompressed digest %q for %q: %w", uncompressed, anyDigest, err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. +// Returns "" if the uncompressed digest is unknown. +func (sqc *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { + res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) { + uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String()) + if err != nil { + return "", err + } + if found { + d, err := digest.Parse(uncompressedString) + if err != nil { + return "", err + } + return d, nil + + } + return "", nil + }) + if err != nil { + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (sqc *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for uncompressed digest for blob with TOC %q", tocDigest) + } + if gotPrevious { + previous, err := digest.Parse(previousString) + if err != nil { + return void{}, err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed) + } + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestTOCUncompressedPairs(tocDigest, uncompressedDigest) VALUES (?, ?)", + tocDigest.String(), uncompressed.String()); err != nil { + return void{}, fmt.Errorf("recording uncompressed digest %q for blob with TOC %q: %w", uncompressed, tocDigest, err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, location types.BICLocationReference) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + if _, err := tx.Exec("INSERT OR REPLACE INTO KnownLocations(transport, scope, digest, location, time) VALUES (?, ?, ?, ?, ?)", + transport.Name(), scope.Opaque, digest.String(), location.Opaque, time.Now()); err != nil { // Possibly overwriting an older entry. + return void{}, fmt.Errorf("recording known location %q for (%q, %q, %q): %w", + location.Opaque, transport.Name(), scope.Opaque, digest.String(), err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordDigestCompressorData records data for the blob with the specified digest. +// WARNING: Only call this with LOCALLY VERIFIED data: +// - don’t record a compressor for a digest just because some remote author claims so +// (e.g. because a manifest says so); +// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant +// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them +// in a manifest) +// +// otherwise the cache could be poisoned and cause us to make incorrect edits to type +// information in a manifest. +func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for compressor of %q", anyDigest) + } + warned := false + if gotPrevious && previous != data.BaseVariantCompressor { + logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor) + warned = true + } + if data.BaseVariantCompressor == blobinfocache.UnknownCompression { + if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil { + return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err) + } + if _, err := tx.Exec("DELETE FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()); err != nil { + return void{}, fmt.Errorf("deleting specific variant compressor for digest %q: %w", anyDigest, err) + } + } else { + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)", + anyDigest.String(), data.BaseVariantCompressor); err != nil { + return void{}, fmt.Errorf("recording compressor %q for %q: %w", data.BaseVariantCompressor, anyDigest, err) + } + } + + if data.SpecificVariantCompressor != blobinfocache.UnknownCompression { + if !warned { // Don’t warn twice about the same digest + prevSVC, found, err := querySingleValue[string](tx, "SELECT specificVariantCompressor FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for specific variant compressor of %q", anyDigest) + } + if found && data.SpecificVariantCompressor != prevSVC { + logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, prevSVC, data.SpecificVariantCompressor) + } + } + annotations, err := json.Marshal(data.SpecificVariantAnnotations) + if err != nil { + return void{}, err + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestSpecificVariantCompressors(digest, specificVariantCompressor, specificVariantAnnotations) VALUES (?, ?, ?)", + anyDigest.String(), data.SpecificVariantCompressor, annotations); err != nil { + return void{}, fmt.Errorf("recording specific variant compressor %q/%q for %q: %w", data.SpecificVariantCompressor, annotations, anyDigest, err) + } + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), +// and returns the result of appending them to candidates. +// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates +// with unknown compression. +func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, + v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) { + compressionData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } + if v2Options != nil { + var baseVariantCompressor string + var specificVariantCompressor sql.NullString + var annotationBytes []byte + switch err := tx.QueryRow("SELECT compressor, specificVariantCompressor, specificVariantAnnotations "+ + "FROM DigestCompressors LEFT JOIN DigestSpecificVariantCompressors USING (digest) WHERE digest = ?", digest.String()). + Scan(&baseVariantCompressor, &specificVariantCompressor, &annotationBytes); { + case errors.Is(err, sql.ErrNoRows): // Do nothing + case err != nil: + return nil, fmt.Errorf("scanning compressor data: %w", err) + default: + compressionData.BaseVariantCompressor = baseVariantCompressor + if specificVariantCompressor.Valid && annotationBytes != nil { + compressionData.SpecificVariantCompressor = specificVariantCompressor.String + if err := json.Unmarshal(annotationBytes, &compressionData.SpecificVariantAnnotations); err != nil { + return nil, err + } + } + } + } + template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData) + if template == nil { + return candidates, nil + } + + rows, err := tx.Query("SELECT location, time FROM KnownLocations "+ + "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?", + transport.Name(), scope.Opaque, digest.String()) + if err != nil { + return nil, fmt.Errorf("looking up candidate locations: %w", err) + } + defer rows.Close() + + rowAdded := false + for rows.Next() { + var location string + var time time.Time + if err := rows.Scan(&location, &time); err != nil { + return nil, fmt.Errorf("scanning candidate: %w", err) + } + candidates = append(candidates, template.CandidateWithLocation(types.BICLocationReference{Opaque: location}, time)) + rowAdded = true + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating through locations: %w", err) + } + + if !rowAdded && v2Options != nil { + candidates = append(candidates, template.CandidateWithUnknownLocation()) + } + return candidates, nil +} + +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) +// that could possibly be reused within the specified (transport scope) (if they still +// exist, which is not guaranteed). +func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 { + return sqc.candidateLocations(transport, scope, digest, options.CanSubstitute, &options) +} + +// candidateLocations implements CandidateLocations / CandidateLocations2. +// v2Options is not nil if the caller is CandidateLocations2. +func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool, + v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 { + var uncompressedDigest digest.Digest // = "" + res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) { + res := []prioritize.CandidateWithTime{} + res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Options) + if err != nil { + return nil, err + } + if canSubstitute { + uncompressedDigest, err = sqc.uncompressedDigest(tx, primaryDigest) + if err != nil { + return nil, err + } + if uncompressedDigest != "" { + // FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries. + // (In the extreme, we could turn _everything_ this function does into a single query. + // And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.) + // For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations. + rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String()) + if err != nil { + return nil, fmt.Errorf("querying for other digests: %w", err) + } + defer rows.Close() + for rows.Next() { + var otherDigestString string + if err := rows.Scan(&otherDigestString); err != nil { + return nil, fmt.Errorf("scanning other digest: %w", err) + } + otherDigest, err := digest.Parse(otherDigestString) + if err != nil { + return nil, err + } + if otherDigest != primaryDigest && otherDigest != uncompressedDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options) + if err != nil { + return nil, err + } + } + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating through other digests: %w", err) + } + + if uncompressedDigest != primaryDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options) + if err != nil { + return nil, err + } + } + } + } + return res, nil + }) + if err != nil { + return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) + +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (sqc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, nil)) +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go new file mode 100644 index 0000000000..782c86d068 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -0,0 +1,175 @@ +package compression + +import ( + "bytes" + "compress/bzip2" + "fmt" + "io" + + "github.com/containers/image/v5/pkg/compression/internal" + "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/storage/pkg/chunked/compressor" + "github.com/klauspost/pgzip" + "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" +) + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm = types.Algorithm + +var ( + // Gzip compression. + Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, "", + []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor) + // Bzip2 compression. + Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, "", + []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor) + // Xz compression. + Xz = internal.NewAlgorithm(types.XzAlgorithmName, "", + []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor) + // Zstd compression. + Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, "", + []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) + // ZstdChunked is a Zstd compression with chunk metadata which allows random access to individual files. + ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, + nil, ZstdDecompressor, compressor.ZstdCompressor) + + compressionAlgorithms = map[string]Algorithm{ + Gzip.Name(): Gzip, + Bzip2.Name(): Bzip2, + Xz.Name(): Xz, + Zstd.Name(): Zstd, + ZstdChunked.Name(): ZstdChunked, + } +) + +// AlgorithmByName returns the compressor by its name +func AlgorithmByName(name string) (Algorithm, error) { + algorithm, ok := compressionAlgorithms[name] + if ok { + return algorithm, nil + } + return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name) +} + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc = internal.DecompressorFunc + +// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. +func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { + return pgzip.NewReader(r) +} + +// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. +func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { + return io.NopCloser(bzip2.NewReader(r)), nil +} + +// XzDecompressor is a DecompressorFunc for the xz compression algorithm. +func XzDecompressor(r io.Reader) (io.ReadCloser, error) { + r, err := xz.NewReader(r) + if err != nil { + return nil, err + } + return io.NopCloser(r), nil +} + +// gzipCompressor is a CompressorFunc for the gzip compression algorithm. +func gzipCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level != nil { + return pgzip.NewWriterLevel(r, *level) + } + return pgzip.NewWriter(r), nil +} + +// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm. +func bzip2Compressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + return nil, fmt.Errorf("bzip2 compression not supported") +} + +// xzCompressor is a CompressorFunc for the xz compression algorithm. +func xzCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + return xz.NewWriter(r) +} + +// CompressStream returns the compressor by its name +func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) { + m := map[string]string{} + return internal.AlgorithmCompressor(algo)(dest, m, level) +} + +// CompressStreamWithMetadata returns the compressor by its name. +// +// Compressing a stream may create integrity data that allows consuming the compressed byte stream +// while only using subsets of the compressed data (if the compressed data is seekable and most +// of the uncompressed data is already present via other means), while still protecting integrity +// of the compressed stream against unwanted modification. (In OCI container images, this metadata +// is usually carried in manifest annotations.) +// +// Such a partial decompression is not implemented by this package; it is consumed e.g. by +// github.com/containers/storage/pkg/chunked . +// +// If the compression generates such metadata, it is written to the provided metadata map. +func CompressStreamWithMetadata(dest io.Writer, metadata map[string]string, algo Algorithm, level *int) (io.WriteCloser, error) { + return internal.AlgorithmCompressor(algo)(dest, metadata, level) +} + +// DetectCompressionFormat returns an Algorithm and DecompressorFunc if the input is recognized as a compressed format, an invalid +// value and nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) { + buffer := [8]byte{} + + n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. + // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. + return Algorithm{}, nil, nil, err + } + + var retAlgo Algorithm + var decompressor DecompressorFunc + for _, algo := range compressionAlgorithms { + prefix := internal.AlgorithmPrefix(algo) + if len(prefix) > 0 && bytes.HasPrefix(buffer[:n], prefix) { + logrus.Debugf("Detected compression format %s", algo.Name()) + retAlgo = algo + decompressor = internal.AlgorithmDecompressor(algo) + break + } + } + if decompressor == nil { + logrus.Debugf("No compression detected") + } + + return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil +} + +// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { + _, d, r, e := DetectCompressionFormat(input) + return d, r, e +} + +// AutoDecompress takes a stream and returns an uncompressed version of the +// same stream. +// The caller must call Close() on the returned stream (even if the input does not need, +// or does not even support, closing!). +func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { + decompressor, stream, err := DetectCompression(stream) + if err != nil { + return nil, false, fmt.Errorf("detecting compression: %w", err) + } + var res io.ReadCloser + if decompressor != nil { + res, err = decompressor(stream) + if err != nil { + return nil, false, fmt.Errorf("initializing decompression: %w", err) + } + } else { + res = io.NopCloser(stream) + } + return res, decompressor != nil, nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/zstd.go b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go new file mode 100644 index 0000000000..39ae014d2e --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go @@ -0,0 +1,59 @@ +package compression + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type wrapperZstdDecoder struct { + decoder *zstd.Decoder +} + +func (w *wrapperZstdDecoder) Close() error { + w.decoder.Close() + return nil +} + +func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { + return w.decoder.DecodeAll(input, dst) +} + +func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { + return w.decoder.Read(p) +} + +func (w *wrapperZstdDecoder) Reset(r io.Reader) error { + return w.decoder.Reset(r) +} + +func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { + return w.decoder.WriteTo(wr) +} + +func zstdReader(buf io.Reader) (io.ReadCloser, error) { + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err +} + +func zstdWriter(dest io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} + +func zstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) +} + +// zstdCompressor is a CompressorFunc for the zstd compression algorithm. +func zstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level == nil { + return zstdWriter(r) + } + return zstdWriterWithLevel(r, *level) +} + +// ZstdDecompressor is a DecompressorFunc for the zstd compression algorithm. +func ZstdDecompressor(r io.Reader) (io.ReadCloser, error) { + return zstdReader(r) +} diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index da2238a0b6..243b13c88a 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -6,6 +6,8 @@ import ( "errors" "fmt" "io/fs" + "iter" + "maps" "os" "os/exec" "path/filepath" @@ -93,9 +95,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // Credential helpers in the auth file have a // direct mapping to a registry, so we can just // walk the map. - for registry := range fileContents.CredHelpers { - allKeys.Add(registry) - } + allKeys.AddSeq(maps.Keys(fileContents.CredHelpers)) for key := range fileContents.AuthConfigs { key := normalizeAuthFileKey(key, path.legacyFormat) if key == normalizedDockerIORegistry { @@ -115,16 +115,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon return nil, err } } - for registry := range creds { - allKeys.Add(registry) - } + allKeys.AddSeq(maps.Keys(creds)) } } // Now use `GetCredentials` to the specific auth configs for each // previously listed registry. allCreds := make(map[string]types.DockerAuthConfig) - for _, key := range allKeys.Values() { + for key := range allKeys.All() { creds, err := GetCredentials(sys, key) if err != nil { // Note: we rely on the logging in `GetCredentials`. @@ -818,16 +816,10 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut // Support sub-registry namespaces in auth. // (This is not a feature of ~/.docker/config.json; we support it even for // those files as an extension.) - var keys []string - if !path.legacyFormat { - keys = authKeysForKey(key) - } else { - keys = []string{registry} - } - + // // Repo or namespace keys are only supported as exact matches. For registry // keys we prefer exact matches as well. - for _, key := range keys { + for key := range authKeyLookupOrder(key, registry, path.legacyFormat) { if val, exists := fileContents.AuthConfigs[key]; exists { return decodeDockerAuth(path.path, key, val) } @@ -854,25 +846,33 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut return types.DockerAuthConfig{}, nil } -// authKeysForKey returns the keys matching a provided auth file key, in order -// from the best match to worst. For example, +// authKeyLookupOrder returns a sequence for lookup keys matching (key or registry) +// in file with legacyFormat, in order from the best match to worst. +// For example, in a non-legacy file, // when given a repository key "quay.io/repo/ns/image", it returns // - quay.io/repo/ns/image // - quay.io/repo/ns // - quay.io/repo // - quay.io -func authKeysForKey(key string) (res []string) { - for { - res = append(res, key) +func authKeyLookupOrder(key, registry string, legacyFormat bool) iter.Seq[string] { + return func(yield func(string) bool) { + if legacyFormat { + _ = yield(registry) // We stop in any case + return + } + + for { + if !yield(key) { + return + } - lastSlash := strings.LastIndex(key, "/") - if lastSlash == -1 { - break + lastSlash := strings.LastIndex(key, "/") + if lastSlash == -1 { + break + } + key = key[:lastSlash] } - key = key[:lastSlash] } - - return res } // decodeDockerAuth decodes the username and password from conf, diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go index 07fe502942..c9e8ac5cbd 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd package sysregistriesv2 diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go index 741b99f8f7..7dada4b779 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package sysregistriesv2 diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go index 71f5bc8378..677629c5db 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go @@ -134,7 +134,7 @@ func ResolveShortNameAlias(ctx *types.SystemContext, name string) (reference.Nam // editShortNameAlias loads the aliases.conf file and changes it. If value is // set, it adds the name-value pair as a new alias. Otherwise, it will remove // name from the config. -func editShortNameAlias(ctx *types.SystemContext, name string, value *string) error { +func editShortNameAlias(ctx *types.SystemContext, name string, value *string) (retErr error) { if err := validateShortName(name); err != nil { return err } @@ -178,7 +178,13 @@ func editShortNameAlias(ctx *types.SystemContext, name string, value *string) er if err != nil { return err } - defer f.Close() + // since we are writing to this file, make sure we handle err on Close() + defer func() { + closeErr := f.Close() + if retErr == nil { + retErr = closeErr + } + }() encoder := toml.NewEncoder(f) return encoder.Encode(conf) @@ -229,7 +235,7 @@ func parseShortNameValue(alias string) (reference.Named, error) { } registry := reference.Domain(named) - if !(strings.ContainsAny(registry, ".:") || registry == "localhost") { + if !strings.ContainsAny(registry, ".:") && registry != "localhost" { return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias) } diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index 9ac050512a..318988f054 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -4,9 +4,11 @@ import ( "errors" "fmt" "io/fs" + "maps" "os" "path/filepath" "reflect" + "slices" "sort" "strings" "sync" @@ -18,7 +20,6 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/regexp" "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" ) // systemRegistriesConfPath is the path to the system-wide registry @@ -430,7 +431,8 @@ func (config *V2RegistriesConf) postProcessRegistries() error { return fmt.Errorf("pull-from-mirror must not be set for a non-mirror registry %q", reg.Prefix) } // make sure mirrors are valid - for _, mir := range reg.Mirrors { + for j := range reg.Mirrors { + mir := ®.Mirrors[j] mir.Location, err = parseLocation(mir.Location) if err != nil { return err @@ -1040,12 +1042,10 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) { } // Go maps have a non-deterministic order when iterating the keys, so - // we dump them in a slice and sort it to enforce some order in - // Registries slice. Some consumers of c/image (e.g., CRI-O) log the - // configuration where a non-deterministic order could easily cause - // confusion. - prefixes := maps.Keys(registryMap) - sort.Strings(prefixes) + // we sort the keys to enforce some order in Registries slice. + // Some consumers of c/image (e.g., CRI-O) log the configuration + // and a non-deterministic order could easily cause confusion. + prefixes := slices.Sorted(maps.Keys(registryMap)) c.partialV2.Registries = []Registry{} for _, prefix := range prefixes { diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go new file mode 100644 index 0000000000..b313231a80 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/docker.go @@ -0,0 +1,102 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "errors" + "fmt" + "slices" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature/internal" + "github.com/opencontainers/go-digest" +) + +// SignOptions includes optional parameters for signing container images. +type SignOptions struct { + // Passphare to use when signing with the key identity. + Passphrase string +} + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity, and the specified options. +func SignDockerManifestWithOptions(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string, options *SignOptions) ([]byte, error) { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return nil, err + } + sig := newUntrustedSignature(manifestDigest, dockerReference) + + var passphrase string + if options != nil { + passphrase = options.Passphrase + // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior. + if strings.Contains(passphrase, "\n") { + return nil, errors.New("invalid passphrase: must not contain a line break") + } + } + + return sig.sign(mech, keyIdentity, passphrase) +} + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity. +func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { + return SignDockerManifestWithOptions(m, dockerReference, mech, keyIdentity, nil) +} + +// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, +// using mech. +func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte, + expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) { + sig, _, err := VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest, expectedDockerReference, mech, []string{expectedKeyIdentity}) + return sig, err +} + +// VerifyImageManifestSignatureUsingKeyIdentityList checks that unverifiedSignature uses one of the expectedKeyIdentities +// to sign unverifiedManifest as expectedDockerReference, using mech. Returns the verified signature and the key identity that +// was used to verify it. +func VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest []byte, + expectedDockerReference string, mech SigningMechanism, expectedKeyIdentities []string) (*Signature, string, error) { + expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference) + if err != nil { + return nil, "", err + } + var matchedKeyIdentity string + sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + if !slices.Contains(expectedKeyIdentities, keyIdentity) { + return internal.NewInvalidSignatureError(fmt.Sprintf("Signature by %s does not match expected fingerprints %v", keyIdentity, expectedKeyIdentities)) + } + matchedKeyIdentity = keyIdentity + return nil + }, + validateSignedDockerReference: func(signedDockerReference string) error { + signedRef, err := reference.ParseNormalizedNamed(signedDockerReference) + if err != nil { + return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %q in signature", signedDockerReference)) + } + if signedRef.String() != expectedRef.String() { + return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %q does not match %q", + signedDockerReference, expectedDockerReference)) + } + return nil + }, + validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { + matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest) + if err != nil { + return err + } + if !matches { + return internal.NewInvalidSignatureError(fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)) + } + return nil + }, + }) + if err != nil { + return nil, "", err + } + return sig, matchedKeyIdentity, err +} diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go new file mode 100644 index 0000000000..cce4677486 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go @@ -0,0 +1,212 @@ +//go:build !containers_image_fulcio_stub + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/x509" + "encoding/asn1" + "errors" + "fmt" + "slices" + "time" + + "github.com/containers/image/v5/signature/internal" + "github.com/sigstore/fulcio/pkg/certificate" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +// fulcioTrustRoot contains policy allow validating Fulcio-issued certificates. +// Users should call validate() on the policy before using it. +type fulcioTrustRoot struct { + caCertificates *x509.CertPool + oidcIssuer string + subjectEmail string +} + +func (f *fulcioTrustRoot) validate() error { + if f.oidcIssuer == "" { + return errors.New("Internal inconsistency: Fulcio use set up without OIDC issuer") + } + if f.subjectEmail == "" { + return errors.New("Internal inconsistency: Fulcio use set up without subject email") + } + return nil +} + +// fulcioIssuerInCertificate returns the OIDC issuer recorded by Fulcio in unutrustedCertificate; +// it fails if the extension is not present in the certificate, or on any inconsistency. +func fulcioIssuerInCertificate(untrustedCertificate *x509.Certificate) (string, error) { + // == Validate the recorded OIDC issuer + gotOIDCIssuer1 := false + gotOIDCIssuer2 := false + var oidcIssuer1, oidcIssuer2 string + // certificate.ParseExtensions doesn’t reject duplicate extensions, and doesn’t detect inconsistencies + // between certificate.OIDIssuer and certificate.OIDIssuerV2. + // Go 1.19 rejects duplicate extensions universally; but until we can require Go 1.19, + // reject duplicates manually. + for _, untrustedExt := range untrustedCertificate.Extensions { + if untrustedExt.Id.Equal(certificate.OIDIssuer) { //nolint:staticcheck // This is deprecated, but we must continue to accept it. + if gotOIDCIssuer1 { + // Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions + // already in ParseCertificate. + return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v1 extension") + } + oidcIssuer1 = string(untrustedExt.Value) + gotOIDCIssuer1 = true + } else if untrustedExt.Id.Equal(certificate.OIDIssuerV2) { + if gotOIDCIssuer2 { + // Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions + // already in ParseCertificate. + return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v2 extension") + } + rest, err := asn1.Unmarshal(untrustedExt.Value, &oidcIssuer2) + if err != nil { + return "", internal.NewInvalidSignatureError(fmt.Sprintf("invalid ASN.1 in OIDC issuer v2 extension: %v", err)) + } + if len(rest) != 0 { + return "", internal.NewInvalidSignatureError("invalid ASN.1 in OIDC issuer v2 extension, trailing data") + } + gotOIDCIssuer2 = true + } + } + switch { + case gotOIDCIssuer1 && gotOIDCIssuer2: + if oidcIssuer1 != oidcIssuer2 { + return "", internal.NewInvalidSignatureError(fmt.Sprintf("inconsistent OIDC issuer extension values: v1 %#v, v2 %#v", + oidcIssuer1, oidcIssuer2)) + } + return oidcIssuer1, nil + case gotOIDCIssuer1: + return oidcIssuer1, nil + case gotOIDCIssuer2: + return oidcIssuer2, nil + default: + return "", internal.NewInvalidSignatureError("Fulcio certificate is missing the issuer extension") + } +} + +func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte) (crypto.PublicKey, error) { + // == Verify the certificate is correctly signed + var untrustedIntermediatePool *x509.CertPool // = nil + // untrustedCertificateChainPool.AppendCertsFromPEM does something broadly similar, + // but it seems to optimize for memory usage at the cost of larger CPU usage (i.e. to load + // the hundreds of trusted CAs). Golang’s TLS code similarly calls individual AddCert + // for intermediate certificates. + if len(untrustedIntermediateChainBytes) > 0 { + untrustedIntermediateChain, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedIntermediateChainBytes) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading certificate chain: %v", err)) + } + untrustedIntermediatePool = x509.NewCertPool() + if len(untrustedIntermediateChain) > 1 { + for _, untrustedIntermediateCert := range untrustedIntermediateChain[:len(untrustedIntermediateChain)-1] { + untrustedIntermediatePool.AddCert(untrustedIntermediateCert) + } + } + } + + untrustedCertificate, err := parseLeafCertFromPEM(untrustedCertificateBytes) + if err != nil { + return nil, err + } + + // Go rejects Subject Alternative Name that has no DNSNames, EmailAddresses, IPAddresses and URIs; + // we match SAN ourselves, so override that. + if len(untrustedCertificate.UnhandledCriticalExtensions) > 0 { + var remaining []asn1.ObjectIdentifier + for _, oid := range untrustedCertificate.UnhandledCriticalExtensions { + if !oid.Equal(cryptoutils.SANOID) { + remaining = append(remaining, oid) + } + } + untrustedCertificate.UnhandledCriticalExtensions = remaining + } + + if _, err := untrustedCertificate.Verify(x509.VerifyOptions{ + Intermediates: untrustedIntermediatePool, + Roots: f.caCertificates, + // NOTE: Cosign uses untrustedCertificate.NotBefore here (i.e. uses _that_ time for intermediate certificate validation), + // and validates the leaf certificate against relevantTime manually. + // We verify the full certificate chain against relevantTime instead. + // Assuming the certificate is fulcio-generated and very short-lived, that should make little difference. + CurrentTime: relevantTime, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning}, + }); err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("veryfing leaf certificate failed: %v", err)) + } + + // Cosign verifies a SCT of the certificate (either embedded, or even, probably irrelevant, externally-supplied). + // + // We don’t currently do that. + // + // At the very least, with Fulcio we require Rekor SETs to prove Rekor contains a log of the signature, and that + // already contains the full certificate; so a SCT of the certificate is superfluous (assuming Rekor allowed searching by + // certificate subject, which, well…). That argument might go away if we add support for RFC 3161 timestamps instead of Rekor. + // + // Secondarily, assuming a trusted Fulcio server (which, to be fair, might not be the case for the public one) SCT is not clearly + // better than the Fulcio server maintaining an audit log; a SCT can only reveal a misissuance if there is some other authoritative + // log of approved Fulcio invocations, and it’s not clear where that would come from, especially human users manually + // logging in using OpenID are not going to maintain a record of those actions. + // + // Also, the SCT does not help reveal _what_ was maliciously signed, nor does it protect against malicious signatures + // by correctly-issued certificates. + // + // So, pragmatically, the ideal design seem to be to only do signatures from a trusted build system (which is, by definition, + // the arbiter of desired vs. malicious signatures) that maintains an audit log of performed signature operations; and that seems to + // make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary. + + // == Validate the recorded OIDC issuer + oidcIssuer, err := fulcioIssuerInCertificate(untrustedCertificate) + if err != nil { + return nil, err + } + if oidcIssuer != f.oidcIssuer { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected Fulcio OIDC issuer %q", oidcIssuer)) + } + + // == Validate the OIDC subject + if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %q not found (got %q)", + f.subjectEmail, + untrustedCertificate.EmailAddresses)) + } + // FIXME: Match more subject types? Cosign does: + // - .DNSNames (can’t be issued by Fulcio) + // - .IPAddresses (can’t be issued by Fulcio) + // - .URIs (CAN be issued by Fulcio) + // - OtherName values in SAN (CAN be issued by Fulcio) + // - Various values about GitHub workflows (CAN be issued by Fulcio) + // What does it… mean to get an OAuth2 identity for an IP address? + // FIXME: How far into Turing-completeness for the issuer/subject do we need to get? Simultaneously accepted alternatives, for + // issuers and/or subjects and/or combinations? Regexps? More? + + return untrustedCertificate.PublicKey, nil +} + +func parseLeafCertFromPEM(untrustedCertificateBytes []byte) (*x509.Certificate, error) { + untrustedLeafCerts, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedCertificateBytes) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("parsing leaf certificate: %v", err)) + } + switch len(untrustedLeafCerts) { + case 0: + return nil, internal.NewInvalidSignatureError("no certificate found in signature certificate data") + case 1: // OK + return untrustedLeafCerts[0], nil + default: + return nil, internal.NewInvalidSignatureError("unexpected multiple certificates present in signature certificate data") + } +} + +func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, + untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, + untrustedPayloadBytes []byte) (crypto.PublicKey, error) { + rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKeys, untrustedRekorSET, untrustedCertificateBytes, + untrustedBase64Signature, untrustedPayloadBytes) + if err != nil { + return nil, err + } + return fulcioTrustRoot.verifyFulcioCertificateAtTime(rekorSETTime, untrustedCertificateBytes, untrustedIntermediateChainBytes) +} diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go new file mode 100644 index 0000000000..da8e13c1df --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go @@ -0,0 +1,27 @@ +//go:build containers_image_fulcio_stub + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/x509" + "errors" +) + +type fulcioTrustRoot struct { + caCertificates *x509.CertPool + oidcIssuer string + subjectEmail string +} + +func (f *fulcioTrustRoot) validate() error { + return errors.New("fulcio disabled at compile-time") +} + +func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, + untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, + untrustedPayloadBytes []byte) (crypto.PublicKey, error) { + return nil, errors.New("fulcio disabled at compile-time") + +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/errors.go b/vendor/github.com/containers/image/v5/signature/internal/errors.go new file mode 100644 index 0000000000..d21e8544d4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/errors.go @@ -0,0 +1,24 @@ +package internal + +// InvalidSignatureError is returned when parsing an invalid signature. +// This is publicly visible as signature.InvalidSignatureError +type InvalidSignatureError struct { + msg string +} + +func (err InvalidSignatureError) Error() string { + return err.msg +} + +func NewInvalidSignatureError(msg string) InvalidSignatureError { + return InvalidSignatureError{msg: msg} +} + +// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError. +// All other errors are returned as is. +func JSONFormatToInvalidSignatureError(err error) error { + if formatErr, ok := err.(JSONFormatError); ok { + err = NewInvalidSignatureError(formatErr.Error()) + } + return err +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/json.go b/vendor/github.com/containers/image/v5/signature/internal/json.go new file mode 100644 index 0000000000..f9efafb8e1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/json.go @@ -0,0 +1,90 @@ +package internal + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + + "github.com/containers/image/v5/internal/set" +) + +// JSONFormatError is returned when JSON does not match expected format. +type JSONFormatError string + +func (err JSONFormatError) Error() string { + return string(err) +} + +// ParanoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to +// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected. +// +// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, +// we could use reflection to automate this. Later? +func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) error { + seenKeys := set.New[string]() + + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return JSONFormatError(err.Error()) + } + if t != json.Delim('{') { + return JSONFormatError(fmt.Sprintf("JSON object expected, got %#v", t)) + } + for { + t, err := dec.Token() + if err != nil { + return JSONFormatError(err.Error()) + } + if t == json.Delim('}') { + break + } + + key, ok := t.(string) + if !ok { + // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. + return JSONFormatError(fmt.Sprintf("Key string literal expected, got %#v", t)) + } + if seenKeys.Contains(key) { + return JSONFormatError(fmt.Sprintf("Duplicate key %q", key)) + } + seenKeys.Add(key) + + valuePtr := fieldResolver(key) + if valuePtr == nil { + return JSONFormatError(fmt.Sprintf("Unknown key %q", key)) + } + // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value. + if err := dec.Decode(valuePtr); err != nil { + return JSONFormatError(err.Error()) + } + } + if _, err := dec.Token(); err != io.EOF { + return JSONFormatError("Unexpected data after JSON object") + } + return nil +} + +// ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields +// must be present exactly once, and none other fields are accepted. +func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error { + seenKeys := set.New[string]() + if err := ParanoidUnmarshalJSONObject(data, func(key string) any { + if valuePtr, ok := exactFields[key]; ok { + seenKeys.Add(key) + return valuePtr + } + return nil + }); err != nil { + return err + } + for key := range exactFields { + if !seenKeys.Contains(key) { + return JSONFormatError(fmt.Sprintf(`Key %q missing in a JSON object`, key)) + } + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go new file mode 100644 index 0000000000..f26a978701 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go @@ -0,0 +1,238 @@ +//go:build !containers_image_rekor_stub + +package internal + +import ( + "bytes" + "crypto/ecdsa" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "fmt" + "time" + + "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" + "github.com/sigstore/rekor/pkg/generated/models" +) + +// This is the github.com/sigstore/rekor/pkg/generated/models.Hashedrekord.APIVersion for github.com/sigstore/rekor/pkg/generated/models.HashedrekordV001Schema. +// We could alternatively use github.com/sigstore/rekor/pkg/types/hashedrekord.APIVERSION, but that subpackage adds too many dependencies. +const HashedRekordV001APIVersion = "0.0.1" + +// UntrustedRekorSET is a parsed content of the sigstore-signature Rekor SET +// (note that this a signature-specific format, not a format directly used by the Rekor API). +// This corresponds to github.com/sigstore/cosign/bundle.RekorBundle, but we impose a stricter decoder. +type UntrustedRekorSET struct { + UntrustedSignedEntryTimestamp []byte // A signature over some canonical JSON form of UntrustedPayload + UntrustedPayload json.RawMessage +} + +type UntrustedRekorPayload struct { + Body []byte // In cosign, this is an any, but only a string works + IntegratedTime int64 + LogIndex int64 + LogID string +} + +// A compile-time check that UntrustedRekorSET implements json.Unmarshaler +var _ json.Unmarshaler = (*UntrustedRekorSET)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error { + return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data)) +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. +// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. +func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error { + return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "SignedEntryTimestamp": &s.UntrustedSignedEntryTimestamp, + "Payload": &s.UntrustedPayload, + }) +} + +// A compile-time check that UntrustedRekorSET and *UntrustedRekorSET implements json.Marshaler +var _ json.Marshaler = UntrustedRekorSET{} +var _ json.Marshaler = (*UntrustedRekorSET)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s UntrustedRekorSET) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]any{ + "SignedEntryTimestamp": s.UntrustedSignedEntryTimestamp, + "Payload": s.UntrustedPayload, + }) +} + +// A compile-time check that UntrustedRekorPayload implements json.Unmarshaler +var _ json.Unmarshaler = (*UntrustedRekorPayload)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error { + return JSONFormatToInvalidSignatureError(p.strictUnmarshalJSON(data)) +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. +// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. +func (p *UntrustedRekorPayload) strictUnmarshalJSON(data []byte) error { + return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "body": &p.Body, + "integratedTime": &p.IntegratedTime, + "logIndex": &p.LogIndex, + "logID": &p.LogID, + }) +} + +// A compile-time check that UntrustedRekorPayload and *UntrustedRekorPayload implements json.Marshaler +var _ json.Marshaler = UntrustedRekorPayload{} +var _ json.Marshaler = (*UntrustedRekorPayload)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]any{ + "body": p.Body, + "integratedTime": p.IntegratedTime, + "logIndex": p.LogIndex, + "logID": p.LogID, + }) +} + +// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data. +// Returns bundle upload time on success. +func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { + // FIXME: Should the publicKey parameter hard-code ecdsa? + + // == Parse SET bytes + var untrustedSET UntrustedRekorSET + // Sadly. we need to parse and transform untrusted data before verifying a cryptographic signature... + if err := json.Unmarshal(unverifiedRekorSET, &untrustedSET); err != nil { + return time.Time{}, NewInvalidSignatureError(err.Error()) + } + // == Verify SET signature + // Cosign unmarshals and re-marshals UntrustedPayload; that seems unnecessary, + // assuming jsoncanonicalizer is designed to operate on untrusted data. + untrustedSETPayloadCanonicalBytes, err := jsoncanonicalizer.Transform(untrustedSET.UntrustedPayload) + if err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("canonicalizing Rekor SET JSON: %v", err)) + } + untrustedSETPayloadHash := sha256.Sum256(untrustedSETPayloadCanonicalBytes) + publicKeymatched := false + for _, pk := range publicKeys { + if ecdsa.VerifyASN1(pk, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) { + publicKeymatched = true + break + } + } + if !publicKeymatched { + return time.Time{}, NewInvalidSignatureError("cryptographic signature verification of Rekor SET failed") + } + + // == Parse SET payload + // Parse the cryptographically-verified canonicalized variant, NOT the originally-delivered representation, + // to decrease risk of exploiting the JSON parser. Note that if there were an arbitrary execution vulnerability, the attacker + // could have exploited the parsing of unverifiedRekorSET above already; so this, at best, ensures more consistent processing + // of the SET payload. + var rekorPayload UntrustedRekorPayload + if err := json.Unmarshal(untrustedSETPayloadCanonicalBytes, &rekorPayload); err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("parsing Rekor SET payload: %v", err.Error())) + } + // FIXME: Use a different decoder implementation? The Swagger-generated code is kinda ridiculous, with the need to re-marshal + // hashedRekor.Spec and so on. + // Especially if we anticipate needing to decode different data formats… + // That would also allow being much more strict about JSON. + // + // Alternatively, rely on the existing .Validate() methods instead of manually checking for nil all over the place. + var hashedRekord models.Hashedrekord + if err := json.Unmarshal(rekorPayload.Body, &hashedRekord); err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding the body of a Rekor SET payload: %v", err)) + } + // The decode of models.HashedRekord validates the "kind": "hashedrecord" field, which is otherwise invisible to us. + if hashedRekord.APIVersion == nil { + return time.Time{}, NewInvalidSignatureError("missing Rekor SET Payload API version") + } + if *hashedRekord.APIVersion != HashedRekordV001APIVersion { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("unsupported Rekor SET Payload hashedrekord version %#v", hashedRekord.APIVersion)) + } + hashedRekordV001Bytes, err := json.Marshal(hashedRekord.Spec) + if err != nil { + // Coverage: hashedRekord.Spec is an any that was just unmarshaled, + // so this should never fail. + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("re-creating hashedrekord spec: %v", err)) + } + var hashedRekordV001 models.HashedrekordV001Schema + if err := json.Unmarshal(hashedRekordV001Bytes, &hashedRekordV001); err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding hashedrekod spec: %v", err)) + } + + // == Match unverifiedKeyOrCertBytes + if hashedRekordV001.Signature == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "signature" field in hashedrekord`) + } + if hashedRekordV001.Signature.PublicKey == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "signature.publicKey" field in hashedrekord`) + + } + rekorKeyOrCertPEM, rest := pem.Decode(hashedRekordV001.Signature.PublicKey.Content) + if rekorKeyOrCertPEM == nil { + return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET is not in PEM format") + } + if len(rest) != 0 { + return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET has trailing data") + } + // FIXME: For public keys, let the caller provide the DER-formatted blob instead + // of round-tripping through PEM. + unverifiedKeyOrCertPEM, rest := pem.Decode(unverifiedKeyOrCertBytes) + if unverifiedKeyOrCertPEM == nil { + return time.Time{}, NewInvalidSignatureError("public key or cert to be matched against publicKey in Rekor SET is not in PEM format") + } + if len(rest) != 0 { + return time.Time{}, NewInvalidSignatureError("public key or cert to be matched against publicKey in Rekor SET has trailing data") + } + // NOTE: This compares the PEM payload, but not the object type or headers. + if !bytes.Equal(rekorKeyOrCertPEM.Bytes, unverifiedKeyOrCertPEM.Bytes) { + return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET does not match") + } + // == Match unverifiedSignatureBytes + unverifiedSignatureBytes, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature) + if err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding signature base64: %v", err)) + } + if !bytes.Equal(hashedRekordV001.Signature.Content, unverifiedSignatureBytes) { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("signature in Rekor SET does not match: %#v vs. %#v", + string(hashedRekordV001.Signature.Content), string(unverifiedSignatureBytes))) + } + + // == Match unverifiedPayloadBytes + if hashedRekordV001.Data == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data" field in hashedrekord`) + } + if hashedRekordV001.Data.Hash == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data.hash" field in hashedrekord`) + } + if hashedRekordV001.Data.Hash.Algorithm == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.algorithm" field in hashedrekord`) + } + // FIXME: Rekor 1.3.5 has added SHA-386 and SHA-512 as recognized values. + // Eventually we should support them as well. + // Short-term, Cosign (as of 2024-02 and Cosign 2.2.3) only produces and accepts SHA-256, so right now that’s not a compatibility + // issue. + if *hashedRekordV001.Data.Hash.Algorithm != models.HashedrekordV001SchemaDataHashAlgorithmSha256 { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Unexpected "data.hash.algorithm" value %#v`, *hashedRekordV001.Data.Hash.Algorithm)) + } + if hashedRekordV001.Data.Hash.Value == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.value" field in hashedrekord`) + } + rekorPayloadHash, err := hex.DecodeString(*hashedRekordV001.Data.Hash.Value) + if err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Invalid "data.hash.value" field in hashedrekord: %v`, err)) + + } + unverifiedPayloadHash := sha256.Sum256(unverifiedPayloadBytes) + if !bytes.Equal(rekorPayloadHash, unverifiedPayloadHash[:]) { + return time.Time{}, NewInvalidSignatureError("payload in Rekor SET does not match") + } + + // == All OK; return the relevant time. + return time.Unix(rekorPayload.IntegratedTime, 0), nil +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go new file mode 100644 index 0000000000..4ff3da7edb --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go @@ -0,0 +1,14 @@ +//go:build containers_image_rekor_stub + +package internal + +import ( + "crypto/ecdsa" + "time" +) + +// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data. +// Returns bundle upload time on success. +func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { + return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time") +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go new file mode 100644 index 0000000000..90a81dc1c4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go @@ -0,0 +1,239 @@ +package internal + +import ( + "bytes" + "crypto" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/containers/image/v5/version" + digest "github.com/opencontainers/go-digest" + sigstoreSignature "github.com/sigstore/sigstore/pkg/signature" +) + +const ( + sigstoreSignatureType = "cosign container image signature" + sigstoreHarcodedHashAlgorithm = crypto.SHA256 +) + +// UntrustedSigstorePayload is a parsed content of a sigstore signature payload (not the full signature) +type UntrustedSigstorePayload struct { + untrustedDockerManifestDigest digest.Digest + untrustedDockerReference string // FIXME: more precise type? + untrustedCreatorID *string + // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, + // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). + // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, + // we would add another field, UntrustedTimestampNS int64. + untrustedTimestamp *int64 +} + +// NewUntrustedSigstorePayload returns an UntrustedSigstorePayload object with +// the specified primary contents and appropriate metadata. +func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerReference string) UntrustedSigstorePayload { + // Use intermediate variables for these values so that we can take their addresses. + // Golang guarantees that they will have a new address on every execution. + creatorID := "containers/image " + version.Version + timestamp := time.Now().Unix() + return UntrustedSigstorePayload{ + untrustedDockerManifestDigest: dockerManifestDigest, + untrustedDockerReference: dockerReference, + untrustedCreatorID: &creatorID, + untrustedTimestamp: ×tamp, + } +} + +// A compile-time check that UntrustedSigstorePayload and *UntrustedSigstorePayload implements json.Marshaler +var _ json.Marshaler = UntrustedSigstorePayload{} +var _ json.Marshaler = (*UntrustedSigstorePayload)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) { + if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" { + return nil, errors.New("Unexpected empty signature content") + } + critical := map[string]any{ + "type": sigstoreSignatureType, + "image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.untrustedDockerReference}, + } + optional := map[string]any{} + if s.untrustedCreatorID != nil { + optional["creator"] = *s.untrustedCreatorID + } + if s.untrustedTimestamp != nil { + optional["timestamp"] = *s.untrustedTimestamp + } + signature := map[string]any{ + "critical": critical, + "optional": optional, + } + return json.Marshal(signature) +} + +// Compile-time check that UntrustedSigstorePayload implements json.Unmarshaler +var _ json.Unmarshaler = (*UntrustedSigstorePayload)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error { + return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data)) +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. +// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. +func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { + var critical, optional json.RawMessage + if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "critical": &critical, + "optional": &optional, + }); err != nil { + return err + } + + var creatorID string + var timestamp float64 + var gotCreatorID, gotTimestamp = false, false + // /usr/bin/cosign generates "optional": null if there are no user-specified annotations. + if !bytes.Equal(optional, []byte("null")) { + if err := ParanoidUnmarshalJSONObject(optional, func(key string) any { + switch key { + case "creator": + gotCreatorID = true + return &creatorID + case "timestamp": + gotTimestamp = true + return ×tamp + default: + var ignore any + return &ignore + } + }); err != nil { + return err + } + } + if gotCreatorID { + s.untrustedCreatorID = &creatorID + } + if gotTimestamp { + intTimestamp := int64(timestamp) + if float64(intTimestamp) != timestamp { + return NewInvalidSignatureError("Field optional.timestamp is not an integer") + } + s.untrustedTimestamp = &intTimestamp + } + + var t string + var image, identity json.RawMessage + if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{ + "type": &t, + "image": &image, + "identity": &identity, + }); err != nil { + return err + } + if t != sigstoreSignatureType { + return NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t)) + } + + var digestString string + if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{ + "docker-manifest-digest": &digestString, + }); err != nil { + return err + } + digestValue, err := digest.Parse(digestString) + if err != nil { + return NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err)) + } + s.untrustedDockerManifestDigest = digestValue + + return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{ + "docker-reference": &s.untrustedDockerReference, + }) +} + +// SigstorePayloadAcceptanceRules specifies how to decide whether an untrusted payload is acceptable. +// We centralize the actual parsing and data extraction in VerifySigstorePayload; this supplies +// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature +// because the functions have the same or similar types, so there is a risk of exchanging the functions; +// named members of this struct are more explicit. +type SigstorePayloadAcceptanceRules struct { + ValidateSignedDockerReference func(string) error + ValidateSignedDockerManifestDigest func(digest.Digest) error +} + +// verifySigstorePayloadBlobSignature verifies unverifiedSignature of unverifiedPayload was correctly created +// by any of the public keys in publicKeys. +// +// This is an internal implementation detail of VerifySigstorePayload and should have no other callers. +// It is INSUFFICIENT alone to consider the signature acceptable. +func verifySigstorePayloadBlobSignature(publicKeys []crypto.PublicKey, unverifiedPayload, unverifiedSignature []byte) error { + if len(publicKeys) == 0 { + return errors.New("Need at least one public key to verify the sigstore payload, but got 0") + } + + verifiers := make([]sigstoreSignature.Verifier, 0, len(publicKeys)) + for _, key := range publicKeys { + // Failing to load a verifier indicates that something is really, really + // invalid about the public key; prefer to fail even if the signature might be + // valid with other keys, so that users fix their fallback keys before they need them. + // For that reason, we even initialize all verifiers before trying to validate the signature + // with any key. + verifier, err := sigstoreSignature.LoadVerifier(key, sigstoreHarcodedHashAlgorithm) + if err != nil { + return err + } + verifiers = append(verifiers, verifier) + } + + var failures []string + for _, verifier := range verifiers { + // github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(), + // which seems to be not used by anything. So we don’t bother. + err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload)) + if err == nil { + return nil + } + + failures = append(failures, err.Error()) + } + + if len(failures) == 0 { + // Coverage: We have checked there is at least one public key, any success causes an early return, + // and any failure adds an entry to failures => there must be at least one error. + return fmt.Errorf("Internal error: signature verification failed but no errors have been recorded") + } + return NewInvalidSignatureError("cryptographic signature verification failed: " + strings.Join(failures, ", ")) +} + +// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by any of the public keys in publicKeys, and that its principal components +// match expected values, both as specified by rules, and returns it. +// We return an *UntrustedSigstorePayload, although nothing actually uses it, +// just to double-check against stupid typos. +func VerifySigstorePayload(publicKeys []crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) { + unverifiedSignature, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature) + if err != nil { + return nil, NewInvalidSignatureError(fmt.Sprintf("base64 decoding: %v", err)) + } + + if err := verifySigstorePayloadBlobSignature(publicKeys, unverifiedPayload, unverifiedSignature); err != nil { + return nil, err + } + + var unmatchedPayload UntrustedSigstorePayload + if err := json.Unmarshal(unverifiedPayload, &unmatchedPayload); err != nil { + return nil, NewInvalidSignatureError(err.Error()) + } + if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.untrustedDockerManifestDigest); err != nil { + return nil, err + } + if err := rules.ValidateSignedDockerReference(unmatchedPayload.untrustedDockerReference); err != nil { + return nil, err + } + // SigstorePayloadAcceptanceRules have accepted this value. + return &unmatchedPayload, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go new file mode 100644 index 0000000000..1d3fe0fdc9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism.go @@ -0,0 +1,97 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io" + "strings" + + // This code is used only to parse the data in an explicitly-untrusted + // code path, where cryptography is not relevant. For now, continue to + // use this frozen deprecated implementation. When mechanism_openpgp.go + // migrates to another implementation, this should migrate as well. + //lint:ignore SA1019 See above + "golang.org/x/crypto/openpgp" //nolint:staticcheck +) + +// SigningMechanism abstracts a way to sign binary blobs and verify their signatures. +// Each mechanism should eventually be closed by calling Close(). +type SigningMechanism interface { + // Close removes resources associated with the mechanism, if any. + Close() error + // SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. + SupportsSigning() error + // Sign creates a (non-detached) signature of input using keyIdentity. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + Sign(input []byte, keyIdentity string) ([]byte, error) + // Verify parses unverifiedSignature and returns the content and the signer's identity + Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) + // UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, + // along with a short identifier of the key used for signing. + // WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) + // is NOT the same as a "key identity" used in other calls to this interface, and + // the values may have no recognizable relationship if the public key is not available. + UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) +} + +// signingMechanismWithPassphrase is an internal extension of SigningMechanism. +type signingMechanismWithPassphrase interface { + SigningMechanism + + // Sign creates a (non-detached) signature of input using keyIdentity and passphrase. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) +} + +// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. +type SigningNotSupportedError string + +func (err SigningNotSupportedError) Error() string { + return string(err) +} + +// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default +// GPG configuration ($GNUPGHOME / ~/.gnupg) +// The caller must call .Close() on the returned SigningMechanism. +func NewGPGSigningMechanism() (SigningMechanism, error) { + return newGPGSigningMechanismInDirectory("") +} + +// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + return newEphemeralGPGSigningMechanism([][]byte{blob}) +} + +// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls to this interface, and +// the values may have no recognizable relationship if the public key is not available. +func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + // This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography. + md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("The input is not a signature") + } + content, err := io.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: An error during reading the body can happen only if + // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key + // to decrypt the contents anyway), or + // 2) the message is signed AND we give ReadMessage a corresponding public key, which we don’t. + return nil, "", err + } + + // Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints + // (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)! + return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go new file mode 100644 index 0000000000..8a8c5878a0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go @@ -0,0 +1,206 @@ +//go:build !containers_image_openpgp + +package signature + +import ( + "bytes" + "errors" + "fmt" + "os" + + "github.com/containers/image/v5/signature/internal" + "github.com/proglottis/gpgme" +) + +// A GPG/OpenPGP signing mechanism, implemented using gpgme. +type gpgmeSigningMechanism struct { + ctx *gpgme.Context + ephemeralDir string // If not "", a directory to be removed on Close() +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { + ctx, err := newGPGMEContext(optionalDir) + if err != nil { + return nil, err + } + return &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: "", + }, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blobs, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) { + dir, err := os.MkdirTemp("", "containers-ephemeral-gpg-") + if err != nil { + return nil, nil, err + } + removeDir := true + defer func() { + if removeDir { + os.RemoveAll(dir) + } + }() + ctx, err := newGPGMEContext(dir) + if err != nil { + return nil, nil, err + } + mech := &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: dir, + } + keyIdentities := []string{} + for _, blob := range blobs { + ki, err := mech.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + keyIdentities = append(keyIdentities, ki...) + } + + removeDir = false + return mech, keyIdentities, nil +} + +// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty. +func newGPGMEContext(optionalDir string) (*gpgme.Context, error) { + ctx, err := gpgme.New() + if err != nil { + return nil, err + } + if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil { + return nil, err + } + if optionalDir != "" { + err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir) + if err != nil { + return nil, err + } + } + ctx.SetArmor(false) + ctx.SetTextMode(false) + return ctx, nil +} + +func (m *gpgmeSigningMechanism) Close() error { + if m.ephemeralDir != "" { + os.RemoveAll(m.ephemeralDir) // Ignore an error, if any + } + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism); +// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism. +func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + inputData, err := gpgme.NewDataBytes(blob) + if err != nil { + return nil, err + } + res, err := m.ctx.Import(inputData) + if err != nil { + return nil, err + } + keyIdentities := []string{} + for _, i := range res.Imports { + if i.Result == nil { + keyIdentities = append(keyIdentities, i.Fingerprint) + } + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *gpgmeSigningMechanism) SupportsSigning() error { + return nil +} + +// Sign creates a (non-detached) signature of input using keyIdentity and passphrase. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { + key, err := m.ctx.GetKey(keyIdentity, true) + if err != nil { + return nil, err + } + inputData, err := gpgme.NewDataBytes(input) + if err != nil { + return nil, err + } + var sigBuffer bytes.Buffer + sigData, err := gpgme.NewDataWriter(&sigBuffer) + if err != nil { + return nil, err + } + + if passphrase != "" { + // Callback to write the passphrase to the specified file descriptor. + callback := func(uidHint string, prevWasBad bool, gpgmeFD *os.File) error { + if prevWasBad { + return errors.New("bad passphrase") + } + _, err := gpgmeFD.WriteString(passphrase + "\n") + return err + } + if err := m.ctx.SetCallback(callback); err != nil { + return nil, fmt.Errorf("setting gpgme passphrase callback: %w", err) + } + + // Loopback mode will use the callback instead of prompting the user. + if err := m.ctx.SetPinEntryMode(gpgme.PinEntryLoopback); err != nil { + return nil, fmt.Errorf("setting gpgme pinentry mode: %w", err) + } + } + + if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { + return nil, err + } + return sigBuffer.Bytes(), nil +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity +func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + signedBuffer := bytes.Buffer{} + signedData, err := gpgme.NewDataWriter(&signedBuffer) + if err != nil { + return nil, "", err + } + unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature) + if err != nil { + return nil, "", err + } + _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData) + if err != nil { + return nil, "", err + } + if len(sigs) != 1 { + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))) + } + sig := sigs[0] + // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves + if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage { + // FIXME: Better error reporting eventually + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", sig)) + } + return signedBuffer.Bytes(), sig.Fingerprint, nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls to this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m *gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go new file mode 100644 index 0000000000..fea8590e1b --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go @@ -0,0 +1,179 @@ +//go:build containers_image_openpgp + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path" + "strings" + "time" + + "github.com/containers/image/v5/signature/internal" + "github.com/containers/storage/pkg/homedir" + + // This is a fallback code; the primary recommendation is to use the gpgme mechanism + // implementation, which is out-of-process and more appropriate for handling long-term private key material + // than any Go implementation. + // For this verify-only fallback, we haven't reviewed any of the + // existing alternatives to choose; so, for now, continue to + // use this frozen deprecated implementation. + //lint:ignore SA1019 See above + "golang.org/x/crypto/openpgp" //nolint:staticcheck +) + +// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. +type openpgpSigningMechanism struct { + keyring openpgp.EntityList +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + + gpgHome := optionalDir + if gpgHome == "" { + gpgHome = os.Getenv("GNUPGHOME") + if gpgHome == "" { + gpgHome = path.Join(homedir.Get(), ".gnupg") + } + } + + pubring, err := os.ReadFile(path.Join(gpgHome, "pubring.gpg")) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + } else { + _, err := m.importKeysFromBytes(pubring) + if err != nil { + return nil, err + } + } + return m, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + keyIdentities := []string{} + for _, blob := range blobs { + ki, err := m.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + keyIdentities = append(keyIdentities, ki...) + } + + return m, keyIdentities, nil +} + +func (m *openpgpSigningMechanism) Close() error { + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob)) + if err != nil { + k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob)) + if e2 != nil { + return nil, err // The original error -- FIXME: is this better? + } + keyring = k + } + + keyIdentities := []string{} + for _, entity := range keyring { + if entity.PrimaryKey == nil { + // Coverage: This should never happen, openpgp.ReadEntity fails with a + // openpgp.errors.StructuralError instead of returning an entity with this + // field set to nil. + continue + } + // Uppercase the fingerprint to be compatible with gpgme + keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint))) + m.keyring = append(m.keyring, entity) + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *openpgpSigningMechanism) SupportsSigning() error { + return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { + return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity +func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("not signed") + } + content, err := io.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted + // (and possibly also signed, but it _must_ be encrypted) and the signing + // “modification detection code” detects a mismatch. But in that case, + // we would expect the signature verification to fail as well, and that is checked + // first. Besides, we are not supplying any decryption keys, so we really + // can never reach this “encrypted data MDC mismatch” path. + return nil, "", err + } + if md.SignatureError != nil { + return nil, "", fmt.Errorf("signature error: %v", md.SignatureError) + } + if md.SignedBy == nil { + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Key not found for key ID %x in signature", md.SignedByKeyId)) + } + if md.Signature != nil { + if md.Signature.SigLifetimeSecs != nil { + expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second) + if time.Now().After(expiry) { + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Signature expired on %s", expiry)) + } + } + } else if md.SignatureV3 == nil { + // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3, + // or sets md.SignatureError. + return nil, "", internal.NewInvalidSignatureError("Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set") + } + + // Uppercase the fingerprint to be compatible with gpgme + return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls to this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m *openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/vendor/github.com/containers/image/v5/signature/pki_cert.go b/vendor/github.com/containers/image/v5/signature/pki_cert.go new file mode 100644 index 0000000000..20624540fa --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/pki_cert.go @@ -0,0 +1,74 @@ +package signature + +import ( + "crypto" + "crypto/x509" + "errors" + "fmt" + "slices" + + "github.com/containers/image/v5/signature/internal" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +type pkiTrustRoot struct { + caRootsCertificates *x509.CertPool + caIntermediateCertificates *x509.CertPool + subjectEmail string + subjectHostname string +} + +func (p *pkiTrustRoot) validate() error { + if p.subjectEmail == "" && p.subjectHostname == "" { + return errors.New("Internal inconsistency: PKI use set up without subject email or subject hostname") + } + return nil +} + +func verifyPKI(pkiTrustRoot *pkiTrustRoot, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte) (crypto.PublicKey, error) { + var untrustedIntermediatePool *x509.CertPool + if pkiTrustRoot.caIntermediateCertificates != nil { + untrustedIntermediatePool = pkiTrustRoot.caIntermediateCertificates.Clone() + } else { + untrustedIntermediatePool = x509.NewCertPool() + } + if len(untrustedIntermediateChainBytes) > 0 { + untrustedIntermediateChain, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedIntermediateChainBytes) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading certificate chain: %v", err)) + } + if len(untrustedIntermediateChain) > 1 { + for _, untrustedIntermediateCert := range untrustedIntermediateChain[:len(untrustedIntermediateChain)-1] { + untrustedIntermediatePool.AddCert(untrustedIntermediateCert) + } + } + } + + untrustedCertificate, err := parseLeafCertFromPEM(untrustedCertificateBytes) + if err != nil { + return nil, err + } + + if _, err := untrustedCertificate.Verify(x509.VerifyOptions{ + Intermediates: untrustedIntermediatePool, + Roots: pkiTrustRoot.caRootsCertificates, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning}, + }); err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("veryfing leaf certificate failed: %v", err)) + } + + if pkiTrustRoot.subjectEmail != "" { + if !slices.Contains(untrustedCertificate.EmailAddresses, pkiTrustRoot.subjectEmail) { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %q not found (got %q)", + pkiTrustRoot.subjectEmail, + untrustedCertificate.EmailAddresses)) + } + } + if pkiTrustRoot.subjectHostname != "" { + if err = untrustedCertificate.VerifyHostname(pkiTrustRoot.subjectHostname); err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected subject hostname: %v", err)) + } + } + + return untrustedCertificate.PublicKey, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go new file mode 100644 index 0000000000..8de705c22f --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -0,0 +1,811 @@ +// policy_config.go handles creation of policy objects, either by parsing JSON +// or by programs building them programmatically. + +// The New* constructors are intended to be a stable API. FIXME: after an independent review. + +// Do not invoke the internals of the JSON marshaling/unmarshaling directly. + +// We can't just blindly call json.Unmarshal because that would silently ignore +// typos, and that would just not do for security policy. + +// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context. +// But at least it is not worse than blind json.Unmarshal()… + +package signature + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/signature/internal" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/regexp" +) + +// systemDefaultPolicyPath is the policy path used for DefaultPolicy(). +// You can override this at build time with +// -ldflags '-X github.com/containers/image/v5/signature.systemDefaultPolicyPath=$your_path' +var systemDefaultPolicyPath = builtinDefaultPolicyPath + +// userPolicyFile is the path to the per user policy path. +var userPolicyFile = filepath.FromSlash(".config/containers/policy.json") + +// InvalidPolicyFormatError is returned when parsing an invalid policy configuration. +type InvalidPolicyFormatError string + +func (err InvalidPolicyFormatError) Error() string { + return string(err) +} + +// DefaultPolicy returns the default policy of the system. +// Most applications should be using this method to get the policy configured +// by the system administrator. +// sys should usually be nil, can be set to override the default. +// NOTE: When this function returns an error, report it to the user and abort. +// DO NOT hard-code fallback policies in your application. +func DefaultPolicy(sys *types.SystemContext) (*Policy, error) { + policyPath, err := defaultPolicyPath(sys) + if err != nil { + return nil, err + } + return NewPolicyFromFile(policyPath) +} + +// defaultPolicyPath returns a path to the relevant policy of the system, or an error if the policy is missing. +func defaultPolicyPath(sys *types.SystemContext) (string, error) { + policyFilePath, err := defaultPolicyPathWithHomeDir(sys, homedir.Get(), systemDefaultPolicyPath) + if err != nil { + return "", err + } + return policyFilePath, nil +} + +// defaultPolicyPathWithHomeDir is an internal implementation detail of defaultPolicyPath, +// it exists only to allow testing it with artificial paths. +func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string, systemPolicyPath string) (string, error) { + if sys != nil && sys.SignaturePolicyPath != "" { + return sys.SignaturePolicyPath, nil + } + userPolicyFilePath := filepath.Join(homeDir, userPolicyFile) + if err := fileutils.Exists(userPolicyFilePath); err == nil { + return userPolicyFilePath, nil + } + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemPolicyPath), nil + } + if err := fileutils.Exists(systemPolicyPath); err == nil { + return systemPolicyPath, nil + } + return "", fmt.Errorf("no policy.json file found at any of the following: %q, %q", userPolicyFilePath, systemPolicyPath) +} + +// NewPolicyFromFile returns a policy configured in the specified file. +func NewPolicyFromFile(fileName string) (*Policy, error) { + contents, err := os.ReadFile(fileName) + if err != nil { + return nil, err + } + policy, err := NewPolicyFromBytes(contents) + if err != nil { + return nil, fmt.Errorf("invalid policy in %q: %w", fileName, err) + } + return policy, nil +} + +// NewPolicyFromBytes returns a policy parsed from the specified blob. +// Use this function instead of calling json.Unmarshal directly. +func NewPolicyFromBytes(data []byte) (*Policy, error) { + p := Policy{} + if err := json.Unmarshal(data, &p); err != nil { + return nil, InvalidPolicyFormatError(err.Error()) + } + return &p, nil +} + +// Compile-time check that Policy implements json.Unmarshaler. +var _ json.Unmarshaler = (*Policy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (p *Policy) UnmarshalJSON(data []byte) error { + *p = Policy{} + transports := policyTransportsMap{} + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "default": + return &p.Default + case "transports": + return &transports + default: + return nil + } + }); err != nil { + return err + } + + if p.Default == nil { + return InvalidPolicyFormatError("Default policy is missing") + } + p.Transports = map[string]PolicyTransportScopes(transports) + return nil +} + +// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member. +type policyTransportsMap map[string]PolicyTransportScopes + +// Compile-time check that policyTransportsMap implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportsMap)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyTransportScopes{} + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + // transport can be nil + transport := transports.Get(key) + // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + ptsWithTransport := policyTransportScopesWithTransport{ + transport: transport, + dest: &PolicyTransportScopes{}, // This allocates a new instance on each call. + } + tmpMap[key] = ptsWithTransport.dest + return &ptsWithTransport + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler. +// we want to only use policyTransportScopesWithTransport +var _ json.Unmarshaler = (*PolicyTransportScopes)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error { + return errors.New("Do not try to unmarshal PolicyTransportScopes directly") +} + +// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes +// while validating using a specific ImageTransport if not nil. +type policyTransportScopesWithTransport struct { + transport types.ImageTransport + dest *PolicyTransportScopes +} + +// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyRequirements{} + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + if key != "" && m.transport != nil { + if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil { + return nil + } + } + ptr := &PolicyRequirements{} // This allocates a new instance on each call. + tmpMap[key] = ptr + return ptr + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m.dest)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyRequirements implements json.Unmarshaler. +var _ json.Unmarshaler = (*PolicyRequirements)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyRequirements) UnmarshalJSON(data []byte) error { + reqJSONs := []json.RawMessage{} + if err := json.Unmarshal(data, &reqJSONs); err != nil { + return err + } + if len(reqJSONs) == 0 { + return InvalidPolicyFormatError("List of verification policy requirements must not be empty") + } + res := make([]PolicyRequirement, len(reqJSONs)) + for i, reqJSON := range reqJSONs { + req, err := newPolicyRequirementFromJSON(reqJSON) + if err != nil { + return err + } + res[i] = req + } + *m = res + return nil +} + +// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation. +func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) { + var typeField prCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyRequirement + switch typeField.Type { + case prTypeInsecureAcceptAnything: + res = &prInsecureAcceptAnything{} + case prTypeReject: + res = &prReject{} + case prTypeSignedBy: + res = &prSignedBy{} + case prTypeSignedBaseLayer: + res = &prSignedBaseLayer{} + case prTypeSigstoreSigned: + res = &prSigstoreSigned{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type %q", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type. +func newPRInsecureAcceptAnything() *prInsecureAcceptAnything { + return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}} +} + +// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement. +func NewPRInsecureAcceptAnything() PolicyRequirement { + return newPRInsecureAcceptAnything() +} + +// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler. +var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { + *pr = prInsecureAcceptAnything{} + var tmp prInsecureAcceptAnything + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeInsecureAcceptAnything { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + *pr = *newPRInsecureAcceptAnything() + return nil +} + +// newPRReject is NewPRReject, except it returns the private type. +func newPRReject() *prReject { + return &prReject{prCommon{Type: prTypeReject}} +} + +// NewPRReject returns a new "reject" PolicyRequirement. +func NewPRReject() PolicyRequirement { + return newPRReject() +} + +// Compile-time check that prReject implements json.Unmarshaler. +var _ json.Unmarshaler = (*prReject)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prReject) UnmarshalJSON(data []byte) error { + *pr = prReject{} + var tmp prReject + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeReject { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + *pr = *newPRReject() + return nil +} + +// newPRSignedBy returns a new prSignedBy if parameters are valid. +func newPRSignedBy(keyType sbKeyType, keyPath string, keyPaths []string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + if !keyType.IsValid() { + return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType %q", keyType)) + } + keySources := 0 + if keyPath != "" { + keySources++ + } + if keyPaths != nil { + keySources++ + } + if keyData != nil { + keySources++ + } + if keySources != 1 { + return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths and keyData must be specified") + } + if signedIdentity == nil { + return nil, InvalidPolicyFormatError("signedIdentity not specified") + } + return &prSignedBy{ + prCommon: prCommon{Type: prTypeSignedBy}, + KeyType: keyType, + KeyPath: keyPath, + KeyPaths: keyPaths, + KeyData: keyData, + SignedIdentity: signedIdentity, + }, nil +} + +// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type. +func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, keyPath, nil, nil, signedIdentity) +} + +// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath +func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyPath(keyType, keyPath, signedIdentity) +} + +// newPRSignedByKeyPaths is NewPRSignedByKeyPaths, except it returns the private type. +func newPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, "", keyPaths, nil, signedIdentity) +} + +// NewPRSignedByKeyPaths returns a new "signedBy" PolicyRequirement using KeyPaths +func NewPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyPaths(keyType, keyPaths, signedIdentity) +} + +// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type. +func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, "", nil, keyData, signedIdentity) +} + +// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData +func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyData(keyType, keyData, signedIdentity) +} + +// Compile-time check that prSignedBy implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBy) UnmarshalJSON(data []byte) error { + *pr = prSignedBy{} + var tmp prSignedBy + var gotKeyPath, gotKeyPaths, gotKeyData = false, false, false + var signedIdentity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "type": + return &tmp.Type + case "keyType": + return &tmp.KeyType + case "keyPath": + gotKeyPath = true + return &tmp.KeyPath + case "keyPaths": + gotKeyPaths = true + return &tmp.KeyPaths + case "keyData": + gotKeyData = true + return &tmp.KeyData + case "signedIdentity": + return &signedIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBy { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + if signedIdentity == nil { + tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() + } else { + si, err := newPolicyReferenceMatchFromJSON(signedIdentity) + if err != nil { + return err + } + tmp.SignedIdentity = si + } + + var res *prSignedBy + var err error + switch { + case gotKeyPath && !gotKeyPaths && !gotKeyData: + res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity) + case !gotKeyPath && gotKeyPaths && !gotKeyData: + res, err = newPRSignedByKeyPaths(tmp.KeyType, tmp.KeyPaths, tmp.SignedIdentity) + case !gotKeyPath && !gotKeyPaths && gotKeyData: + res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity) + case !gotKeyPath && !gotKeyPaths && !gotKeyData: + return InvalidPolicyFormatError("Exactly one of keyPath, keyPaths and keyData must be specified, none of them present") + default: + return fmt.Errorf("Exactly one of keyPath, keyPaths and keyData must be specified, more than one present") + } + if err != nil { + return err + } + *pr = *res + + return nil +} + +// IsValid returns true iff kt is a recognized value +func (kt sbKeyType) IsValid() bool { + switch kt { + case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, + SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + return true + default: + return false + } +} + +// Compile-time check that sbKeyType implements json.Unmarshaler. +var _ json.Unmarshaler = (*sbKeyType)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (kt *sbKeyType) UnmarshalJSON(data []byte) error { + *kt = sbKeyType("") + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if !sbKeyType(s).IsValid() { + return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value %q", s)) + } + *kt = sbKeyType(s) + return nil +} + +// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type. +func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) { + if baseLayerIdentity == nil { + return nil, InvalidPolicyFormatError("baseLayerIdentity not specified") + } + return &prSignedBaseLayer{ + prCommon: prCommon{Type: prTypeSignedBaseLayer}, + BaseLayerIdentity: baseLayerIdentity, + }, nil +} + +// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement. +func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedBaseLayer(baseLayerIdentity) +} + +// Compile-time check that prSignedBaseLayer implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBaseLayer)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { + *pr = prSignedBaseLayer{} + var tmp prSignedBaseLayer + var baseLayerIdentity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + "baseLayerIdentity": &baseLayerIdentity, + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBaseLayer { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity) + if err != nil { + return err + } + res, err := newPRSignedBaseLayer(bli) + if err != nil { + // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid. + return err + } + *pr = *res + return nil +} + +// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. +func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { + var typeField prmCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyReferenceMatch + switch typeField.Type { + case prmTypeMatchExact: + res = &prmMatchExact{} + case prmTypeMatchRepoDigestOrExact: + res = &prmMatchRepoDigestOrExact{} + case prmTypeMatchRepository: + res = &prmMatchRepository{} + case prmTypeExactReference: + res = &prmExactReference{} + case prmTypeExactRepository: + res = &prmExactRepository{} + case prmTypeRemapIdentity: + res = &prmRemapIdentity{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type %q", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRMMatchExact is NewPRMMatchExact, except it returns the private type. +func newPRMMatchExact() *prmMatchExact { + return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}} +} + +// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch. +func NewPRMMatchExact() PolicyReferenceMatch { + return newPRMMatchExact() +} + +// Compile-time check that prmMatchExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchExact{} + var tmp prmMatchExact + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + *prm = *newPRMMatchExact() + return nil +} + +// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it returns the private type. +func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact { + return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}} +} + +// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch. +func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch { + return newPRMMatchRepoDigestOrExact() +} + +// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepoDigestOrExact{} + var tmp prmMatchRepoDigestOrExact + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepoDigestOrExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + *prm = *newPRMMatchRepoDigestOrExact() + return nil +} + +// newPRMMatchRepository is NewPRMMatchRepository, except it returns the private type. +func newPRMMatchRepository() *prmMatchRepository { + return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}} +} + +// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch. +func NewPRMMatchRepository() PolicyReferenceMatch { + return newPRMMatchRepository() +} + +// Compile-time check that prmMatchRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepository{} + var tmp prmMatchRepository + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + *prm = *newPRMMatchRepository() + return nil +} + +// newPRMExactReference is NewPRMExactReference, except it returns the private type. +func newPRMExactReference(dockerReference string) (*prmExactReference, error) { + ref, err := reference.ParseNormalizedNamed(dockerReference) + if err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %q: %s", dockerReference, err.Error())) + } + if reference.IsNameOnly(ref) { + return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %q contains neither a tag nor digest", dockerReference)) + } + return &prmExactReference{ + prmCommon: prmCommon{Type: prmTypeExactReference}, + DockerReference: dockerReference, + }, nil +} + +// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch. +func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) { + return newPRMExactReference(dockerReference) +} + +// Compile-time check that prmExactReference implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactReference)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactReference) UnmarshalJSON(data []byte) error { + *prm = prmExactReference{} + var tmp prmExactReference + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + "dockerReference": &tmp.DockerReference, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactReference { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + + res, err := newPRMExactReference(tmp.DockerReference) + if err != nil { + return err + } + *prm = *res + return nil +} + +// newPRMExactRepository is NewPRMExactRepository, except it returns the private type. +func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) { + if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %q: %s", dockerRepository, err.Error())) + } + return &prmExactRepository{ + prmCommon: prmCommon{Type: prmTypeExactRepository}, + DockerRepository: dockerRepository, + }, nil +} + +// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch. +func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) { + return newPRMExactRepository(dockerRepository) +} + +// Compile-time check that prmExactRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { + *prm = prmExactRepository{} + var tmp prmExactRepository + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + "dockerRepository": &tmp.DockerRepository, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + + res, err := newPRMExactRepository(tmp.DockerRepository) + if err != nil { + return err + } + *prm = *res + return nil +} + +// Private objects for validateIdentityRemappingPrefix +var ( + // remapIdentityDomainRegexp matches exactly a reference domain (name[:port]) + remapIdentityDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$") + // remapIdentityDomainPrefixRegexp matches a reference that starts with a domain; + // we need this because reference.NameRegexp accepts short names with docker.io implied. + remapIdentityDomainPrefixRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "/") + // remapIdentityNameRegexp matches exactly a reference.Named name (possibly unnormalized) + remapIdentityNameRegexp = regexp.Delayed("^" + reference.NameRegexp.String() + "$") +) + +// validateIdentityRemappingPrefix returns an InvalidPolicyFormatError if s is detected to be invalid +// for the Prefix or SignedPrefix values of prmRemapIdentity. +// Note that it may not recognize _all_ invalid values. +func validateIdentityRemappingPrefix(s string) error { + if remapIdentityDomainRegexp.MatchString(s) || + (remapIdentityNameRegexp.MatchString(s) && remapIdentityDomainPrefixRegexp.MatchString(s)) { + // FIXME? This does not reject "shortname" nor "ns/shortname", because docker/reference + // does not provide an API for the short vs. long name logic. + // It will either not match, or fail in the ParseNamed call of + // prmRemapIdentity.remapReferencePrefix when trying to use such a prefix. + return nil + } + return InvalidPolicyFormatError(fmt.Sprintf("prefix %q is not valid", s)) +} + +// newPRMRemapIdentity is NewPRMRemapIdentity, except it returns the private type. +func newPRMRemapIdentity(prefix, signedPrefix string) (*prmRemapIdentity, error) { + if err := validateIdentityRemappingPrefix(prefix); err != nil { + return nil, err + } + if err := validateIdentityRemappingPrefix(signedPrefix); err != nil { + return nil, err + } + return &prmRemapIdentity{ + prmCommon: prmCommon{Type: prmTypeRemapIdentity}, + Prefix: prefix, + SignedPrefix: signedPrefix, + }, nil +} + +// NewPRMRemapIdentity returns a new "remapIdentity" PolicyRepositoryMatch. +func NewPRMRemapIdentity(prefix, signedPrefix string) (PolicyReferenceMatch, error) { + return newPRMRemapIdentity(prefix, signedPrefix) +} + +// Compile-time check that prmRemapIdentity implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmRemapIdentity)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error { + *prm = prmRemapIdentity{} + var tmp prmRemapIdentity + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + "prefix": &tmp.Prefix, + "signedPrefix": &tmp.SignedPrefix, + }); err != nil { + return err + } + + if tmp.Type != prmTypeRemapIdentity { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + + res, err := newPRMRemapIdentity(tmp.Prefix, tmp.SignedPrefix) + if err != nil { + return err + } + *prm = *res + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go new file mode 100644 index 0000000000..6393b66ea6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go @@ -0,0 +1,630 @@ +package signature + +import ( + "encoding/json" + "fmt" + + "github.com/containers/image/v5/signature/internal" +) + +// PRSigstoreSignedOption is way to pass values to NewPRSigstoreSigned +type PRSigstoreSignedOption func(*prSigstoreSigned) error + +// PRSigstoreSignedWithKeyPath specifies a value for the "keyPath" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyPath(keyPath string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyPath != "" { + return InvalidPolicyFormatError(`"keyPath" already specified`) + } + pr.KeyPath = keyPath + return nil + } +} + +// PRSigstoreSignedWithKeyPaths specifies a value for the "keyPaths" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyPaths(keyPaths []string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyPaths != nil { + return InvalidPolicyFormatError(`"keyPaths" already specified`) + } + if len(keyPaths) == 0 { + return InvalidPolicyFormatError(`"keyPaths" contains no entries`) + } + pr.KeyPaths = keyPaths + return nil + } +} + +// PRSigstoreSignedWithKeyData specifies a value for the "keyData" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyData(keyData []byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyData != nil { + return InvalidPolicyFormatError(`"keyData" already specified`) + } + pr.KeyData = keyData + return nil + } +} + +// PRSigstoreSignedWithKeyDatas specifies a value for the "keyDatas" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyDatas(keyDatas [][]byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyDatas != nil { + return InvalidPolicyFormatError(`"keyDatas" already specified`) + } + if len(keyDatas) == 0 { + return InvalidPolicyFormatError(`"keyDatas" contains no entries`) + } + pr.KeyDatas = keyDatas + return nil + } +} + +// PRSigstoreSignedWithFulcio specifies a value for the "fulcio" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.Fulcio != nil { + return InvalidPolicyFormatError(`"fulcio" already specified`) + } + pr.Fulcio = fulcio + return nil + } +} + +// PRSigstoreSignedWithPKI specifies a value for the "pki" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithPKI(p PRSigstoreSignedPKI) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.PKI != nil { + return InvalidPolicyFormatError(`"pki" already specified`) + } + pr.PKI = p + return nil + } +} + +// PRSigstoreSignedWithRekorPublicKeyPath specifies a value for the "rekorPublicKeyPath" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyPath(rekorPublicKeyPath string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyPath != "" { + return InvalidPolicyFormatError(`"rekorPublicKeyPath" already specified`) + } + pr.RekorPublicKeyPath = rekorPublicKeyPath + return nil + } +} + +// PRSigstoreSignedWithRekorPublicKeyPaths specifies a value for the rRekorPublickeyPaths" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyPaths(rekorPublickeyPaths []string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyPaths != nil { + return InvalidPolicyFormatError(`"rekorPublickeyPaths" already specified`) + } + if len(rekorPublickeyPaths) == 0 { + return InvalidPolicyFormatError(`"rekorPublickeyPaths" contains no entries`) + } + pr.RekorPublicKeyPaths = rekorPublickeyPaths + return nil + } +} + +// PRSigstoreSignedWithRekorPublicKeyData specifies a value for the "rekorPublicKeyData" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyData(rekorPublicKeyData []byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyData != nil { + return InvalidPolicyFormatError(`"rekorPublicKeyData" already specified`) + } + pr.RekorPublicKeyData = rekorPublicKeyData + return nil + } +} + +// PRSigstoreSignedWithRekorPublicKeyDatas specifies a value for the "rekorPublickeyDatas" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyDatas(rekorPublickeyDatas [][]byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyDatas != nil { + return InvalidPolicyFormatError(`"rekorPublickeyDatas" already specified`) + } + if len(rekorPublickeyDatas) == 0 { + return InvalidPolicyFormatError(`"rekorPublickeyDatas" contains no entries`) + } + pr.RekorPublicKeyDatas = rekorPublickeyDatas + return nil + } +} + +// PRSigstoreSignedWithSignedIdentity specifies a value for the "signedIdentity" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithSignedIdentity(signedIdentity PolicyReferenceMatch) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.SignedIdentity != nil { + return InvalidPolicyFormatError(`"signedIdentity" already specified`) + } + pr.SignedIdentity = signedIdentity + return nil + } +} + +// newPRSigstoreSigned is NewPRSigstoreSigned, except it returns the private type. +func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned, error) { + res := prSigstoreSigned{ + prCommon: prCommon{Type: prTypeSigstoreSigned}, + } + for _, o := range options { + if err := o(&res); err != nil { + return nil, err + } + } + + keySources := 0 + if res.KeyPath != "" { + keySources++ + } + if res.KeyPaths != nil { + keySources++ + } + if res.KeyData != nil { + keySources++ + } + if res.KeyDatas != nil { + keySources++ + } + if res.Fulcio != nil { + keySources++ + } + if res.PKI != nil { + keySources++ + } + if keySources != 1 { + return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths, keyData, keyDatas, fulcio, and pki must be specified") + } + + rekorSources := 0 + if res.RekorPublicKeyPath != "" { + rekorSources++ + } + if res.RekorPublicKeyPaths != nil { + rekorSources++ + } + if res.RekorPublicKeyData != nil { + rekorSources++ + } + if res.RekorPublicKeyDatas != nil { + rekorSources++ + } + if rekorSources > 1 { + return nil, InvalidPolicyFormatError("at most one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas can be used simultaneously") + } + if res.Fulcio != nil && rekorSources == 0 { + return nil, InvalidPolicyFormatError("At least one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas must be specified if fulcio is used") + } + if res.PKI != nil && rekorSources > 0 { + return nil, InvalidPolicyFormatError("rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas are not supported for pki") + } + + if res.SignedIdentity == nil { + return nil, InvalidPolicyFormatError("signedIdentity not specified") + } + + return &res, nil +} + +// NewPRSigstoreSigned returns a new "sigstoreSigned" PolicyRequirement based on options. +func NewPRSigstoreSigned(options ...PRSigstoreSignedOption) (PolicyRequirement, error) { + return newPRSigstoreSigned(options...) +} + +// NewPRSigstoreSignedKeyPath returns a new "sigstoreSigned" PolicyRequirement using a KeyPath +func NewPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return NewPRSigstoreSigned( + PRSigstoreSignedWithKeyPath(keyPath), + PRSigstoreSignedWithSignedIdentity(signedIdentity), + ) +} + +// NewPRSigstoreSignedKeyData returns a new "sigstoreSigned" PolicyRequirement using a KeyData +func NewPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return NewPRSigstoreSigned( + PRSigstoreSignedWithKeyData(keyData), + PRSigstoreSignedWithSignedIdentity(signedIdentity), + ) +} + +// Compile-time check that prSigstoreSigned implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSigstoreSigned)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { + *pr = prSigstoreSigned{} + var tmp prSigstoreSigned + var gotKeyPath, gotKeyPaths, gotKeyData, gotKeyDatas, gotFulcio, gotPKI bool + var gotRekorPublicKeyPath, gotRekorPublicKeyPaths, gotRekorPublicKeyData, gotRekorPublicKeyDatas bool + var fulcio prSigstoreSignedFulcio + var pki prSigstoreSignedPKI + var signedIdentity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "type": + return &tmp.Type + case "keyPath": + gotKeyPath = true + return &tmp.KeyPath + case "keyPaths": + gotKeyPaths = true + return &tmp.KeyPaths + case "keyData": + gotKeyData = true + return &tmp.KeyData + case "keyDatas": + gotKeyDatas = true + return &tmp.KeyDatas + case "fulcio": + gotFulcio = true + return &fulcio + case "rekorPublicKeyPath": + gotRekorPublicKeyPath = true + return &tmp.RekorPublicKeyPath + case "rekorPublicKeyPaths": + gotRekorPublicKeyPaths = true + return &tmp.RekorPublicKeyPaths + case "rekorPublicKeyData": + gotRekorPublicKeyData = true + return &tmp.RekorPublicKeyData + case "rekorPublicKeyDatas": + gotRekorPublicKeyDatas = true + return &tmp.RekorPublicKeyDatas + case "pki": + gotPKI = true + return &pki + case "signedIdentity": + return &signedIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSigstoreSigned { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + if signedIdentity == nil { + tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() + } else { + si, err := newPolicyReferenceMatchFromJSON(signedIdentity) + if err != nil { + return err + } + tmp.SignedIdentity = si + } + + var opts []PRSigstoreSignedOption + if gotKeyPath { + opts = append(opts, PRSigstoreSignedWithKeyPath(tmp.KeyPath)) + } + if gotKeyPaths { + opts = append(opts, PRSigstoreSignedWithKeyPaths(tmp.KeyPaths)) + } + if gotKeyData { + opts = append(opts, PRSigstoreSignedWithKeyData(tmp.KeyData)) + } + if gotKeyDatas { + opts = append(opts, PRSigstoreSignedWithKeyDatas(tmp.KeyDatas)) + } + if gotFulcio { + opts = append(opts, PRSigstoreSignedWithFulcio(&fulcio)) + } + if gotRekorPublicKeyPath { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPath(tmp.RekorPublicKeyPath)) + } + if gotRekorPublicKeyPaths { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPaths(tmp.RekorPublicKeyPaths)) + } + if gotRekorPublicKeyData { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyData(tmp.RekorPublicKeyData)) + } + if gotRekorPublicKeyDatas { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyDatas(tmp.RekorPublicKeyDatas)) + } + if gotPKI { + opts = append(opts, PRSigstoreSignedWithPKI(&pki)) + } + opts = append(opts, PRSigstoreSignedWithSignedIdentity(tmp.SignedIdentity)) + + res, err := newPRSigstoreSigned(opts...) + if err != nil { + return err + } + *pr = *res + return nil +} + +// PRSigstoreSignedFulcioOption is a way to pass values to NewPRSigstoreSignedFulcio +type PRSigstoreSignedFulcioOption func(*prSigstoreSignedFulcio) error + +// PRSigstoreSignedFulcioWithCAPath specifies a value for the "caPath" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.CAPath != "" { + return InvalidPolicyFormatError(`"caPath" already specified`) + } + f.CAPath = caPath + return nil + } +} + +// PRSigstoreSignedFulcioWithCAData specifies a value for the "caData" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.CAData != nil { + return InvalidPolicyFormatError(`"caData" already specified`) + } + f.CAData = caData + return nil + } +} + +// PRSigstoreSignedFulcioWithOIDCIssuer specifies a value for the "oidcIssuer" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.OIDCIssuer != "" { + return InvalidPolicyFormatError(`"oidcIssuer" already specified`) + } + f.OIDCIssuer = oidcIssuer + return nil + } +} + +// PRSigstoreSignedFulcioWithSubjectEmail specifies a value for the "subjectEmail" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithSubjectEmail(subjectEmail string) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.SubjectEmail != "" { + return InvalidPolicyFormatError(`"subjectEmail" already specified`) + } + f.SubjectEmail = subjectEmail + return nil + } +} + +// newPRSigstoreSignedFulcio is NewPRSigstoreSignedFulcio, except it returns the private type +func newPRSigstoreSignedFulcio(options ...PRSigstoreSignedFulcioOption) (*prSigstoreSignedFulcio, error) { + res := prSigstoreSignedFulcio{} + for _, o := range options { + if err := o(&res); err != nil { + return nil, err + } + } + + if res.CAPath != "" && res.CAData != nil { + return nil, InvalidPolicyFormatError("caPath and caData cannot be used simultaneously") + } + if res.CAPath == "" && res.CAData == nil { + return nil, InvalidPolicyFormatError("At least one of caPath and caData must be specified") + } + if res.OIDCIssuer == "" { + return nil, InvalidPolicyFormatError("oidcIssuer not specified") + } + if res.SubjectEmail == "" { + return nil, InvalidPolicyFormatError("subjectEmail not specified") + } + + return &res, nil +} + +// NewPRSigstoreSignedFulcio returns a PRSigstoreSignedFulcio based on options. +func NewPRSigstoreSignedFulcio(options ...PRSigstoreSignedFulcioOption) (PRSigstoreSignedFulcio, error) { + return newPRSigstoreSignedFulcio(options...) +} + +// Compile-time check that prSigstoreSignedFulcio implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSigstoreSignedFulcio)(nil) + +func (f *prSigstoreSignedFulcio) UnmarshalJSON(data []byte) error { + *f = prSigstoreSignedFulcio{} + var tmp prSigstoreSignedFulcio + var gotCAPath, gotCAData, gotOIDCIssuer, gotSubjectEmail bool // = false... + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "caPath": + gotCAPath = true + return &tmp.CAPath + case "caData": + gotCAData = true + return &tmp.CAData + case "oidcIssuer": + gotOIDCIssuer = true + return &tmp.OIDCIssuer + case "subjectEmail": + gotSubjectEmail = true + return &tmp.SubjectEmail + default: + return nil + } + }); err != nil { + return err + } + + var opts []PRSigstoreSignedFulcioOption + if gotCAPath { + opts = append(opts, PRSigstoreSignedFulcioWithCAPath(tmp.CAPath)) + } + if gotCAData { + opts = append(opts, PRSigstoreSignedFulcioWithCAData(tmp.CAData)) + } + if gotOIDCIssuer { + opts = append(opts, PRSigstoreSignedFulcioWithOIDCIssuer(tmp.OIDCIssuer)) + } + if gotSubjectEmail { + opts = append(opts, PRSigstoreSignedFulcioWithSubjectEmail(tmp.SubjectEmail)) + } + + res, err := newPRSigstoreSignedFulcio(opts...) + if err != nil { + return err + } + + *f = *res + return nil +} + +// PRSigstoreSignedPKIOption is a way to pass values to NewPRSigstoreSignedPKI +type PRSigstoreSignedPKIOption func(*prSigstoreSignedPKI) error + +// PRSigstoreSignedPKIWithCARootsPath specifies a value for the "caRootsPath" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithCARootsPath(caRootsPath string) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.CARootsPath != "" { + return InvalidPolicyFormatError(`"caRootsPath" already specified`) + } + p.CARootsPath = caRootsPath + return nil + } +} + +// PRSigstoreSignedPKIWithCARootsData specifies a value for the "caRootsData" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithCARootsData(caRootsData []byte) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.CARootsData != nil { + return InvalidPolicyFormatError(`"caRootsData" already specified`) + } + p.CARootsData = caRootsData + return nil + } +} + +// PRSigstoreSignedPKIWithCAIntermediatesPath specifies a value for the "caIntermediatesPath" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithCAIntermediatesPath(caIntermediatesPath string) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.CAIntermediatesPath != "" { + return InvalidPolicyFormatError(`"caIntermediatesPath" already specified`) + } + p.CAIntermediatesPath = caIntermediatesPath + return nil + } +} + +// PRSigstoreSignedPKIWithCAIntermediatesData specifies a value for the "caIntermediatesData" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithCAIntermediatesData(caIntermediatesData []byte) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.CAIntermediatesData != nil { + return InvalidPolicyFormatError(`"caIntermediatesData" already specified`) + } + p.CAIntermediatesData = caIntermediatesData + return nil + } +} + +// PRSigstoreSignedPKIWithSubjectEmail specifies a value for the "subjectEmail" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithSubjectEmail(subjectEmail string) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.SubjectEmail != "" { + return InvalidPolicyFormatError(`"subjectEmail" already specified`) + } + p.SubjectEmail = subjectEmail + return nil + } +} + +// PRSigstoreSignedPKIWithSubjectHostname specifies a value for the "subjectHostname" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithSubjectHostname(subjectHostname string) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.SubjectHostname != "" { + return InvalidPolicyFormatError(`"subjectHostname" already specified`) + } + p.SubjectHostname = subjectHostname + return nil + } +} + +// newPRSigstoreSignedPKI is NewPRSigstoreSignedPKI, except it returns the private type +func newPRSigstoreSignedPKI(options ...PRSigstoreSignedPKIOption) (*prSigstoreSignedPKI, error) { + res := prSigstoreSignedPKI{} + for _, o := range options { + if err := o(&res); err != nil { + return nil, err + } + } + + if res.CARootsPath != "" && res.CARootsData != nil { + return nil, InvalidPolicyFormatError("caRootsPath and caRootsData cannot be used simultaneously") + } + if res.CARootsPath == "" && res.CARootsData == nil { + return nil, InvalidPolicyFormatError("At least one of caRootsPath and caRootsData must be specified") + } + + if res.CAIntermediatesPath != "" && res.CAIntermediatesData != nil { + return nil, InvalidPolicyFormatError("caIntermediatesPath and caIntermediatesData cannot be used simultaneously") + } + + if res.SubjectEmail == "" && res.SubjectHostname == "" { + return nil, InvalidPolicyFormatError("At least one of subjectEmail, subjectHostname must be specified") + } + + return &res, nil +} + +// NewPRSigstoreSignedPKI returns a PRSigstoreSignedPKI based on options. +func NewPRSigstoreSignedPKI(options ...PRSigstoreSignedPKIOption) (PRSigstoreSignedPKI, error) { + return newPRSigstoreSignedPKI(options...) +} + +// Compile-time check that prSigstoreSignedPKI implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSigstoreSignedPKI)(nil) + +func (p *prSigstoreSignedPKI) UnmarshalJSON(data []byte) error { + *p = prSigstoreSignedPKI{} + var tmp prSigstoreSignedPKI + var gotCARootsPath, gotCARootsData, gotCAIntermediatesPath, gotCAIntermediatesData, gotSubjectEmail, gotSubjectHostname bool + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "caRootsPath": + gotCARootsPath = true + return &tmp.CARootsPath + case "caRootsData": + gotCARootsData = true + return &tmp.CARootsData + case "caIntermediatesPath": + gotCAIntermediatesPath = true + return &tmp.CAIntermediatesPath + case "caIntermediatesData": + gotCAIntermediatesData = true + return &tmp.CAIntermediatesData + case "subjectEmail": + gotSubjectEmail = true + return &tmp.SubjectEmail + case "subjectHostname": + gotSubjectHostname = true + return &tmp.SubjectHostname + default: + return nil + } + }); err != nil { + return err + } + + var opts []PRSigstoreSignedPKIOption + if gotCARootsPath { + opts = append(opts, PRSigstoreSignedPKIWithCARootsPath(tmp.CARootsPath)) + } + if gotCARootsData { + opts = append(opts, PRSigstoreSignedPKIWithCARootsData(tmp.CARootsData)) + } + if gotCAIntermediatesPath { + opts = append(opts, PRSigstoreSignedPKIWithCAIntermediatesPath(tmp.CAIntermediatesPath)) + } + if gotCAIntermediatesData { + opts = append(opts, PRSigstoreSignedPKIWithCAIntermediatesData(tmp.CAIntermediatesData)) + } + if gotSubjectEmail { + opts = append(opts, PRSigstoreSignedPKIWithSubjectEmail(tmp.SubjectEmail)) + } + if gotSubjectHostname { + opts = append(opts, PRSigstoreSignedPKIWithSubjectHostname(tmp.SubjectHostname)) + } + + res, err := newPRSigstoreSignedPKI(opts...) + if err != nil { + return err + } + + *p = *res + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval.go b/vendor/github.com/containers/image/v5/signature/policy_eval.go new file mode 100644 index 0000000000..ab6b89c260 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval.go @@ -0,0 +1,293 @@ +// This defines the top-level policy evaluation API. +// To the extent possible, the interface of the functions provided +// here is intended to be completely unambiguous, and stable for users +// to rely on. + +package signature + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/unparsedimage" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +// PolicyRequirementError is an explanatory text for rejecting a signature or an image. +type PolicyRequirementError string + +func (err PolicyRequirementError) Error() string { + return string(err) +} + +// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted. +type signatureAcceptanceResult string + +const ( + sarAccepted signatureAcceptanceResult = "sarAccepted" + sarRejected signatureAcceptanceResult = "sarRejected" + sarUnknown signatureAcceptanceResult = "sarUnknown" +) + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. +type PolicyRequirement interface { + // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache + // costly initialization like creating temporary GPG home directories and reading files. + // Setup() (someState, error) + // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement. + + // isSignatureAuthorAccepted, given an image and a signature blob, returns: + // - sarAccepted if the signature has been verified against the appropriate public key + // (where "appropriate public key" may depend on the contents of the signature); + // in that case a parsed Signature should be returned. + // - sarRejected if the signature has not been verified; + // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // - sarUnknown if this PolicyRequirement does not deal with signatures. + // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. + // Returning sarUnknown and a non-nil error value is invalid. + // WARNING: This makes the signature contents acceptable for further processing, + // but it does not necessarily mean that the contents of the signature are + // consistent with local policy. + // For example: + // - Do not use a true value to determine whether to run + // a container based on this image; use IsRunningImageAllowed instead. + // - Just because a signature is accepted does not automatically mean the contents of the + // signature are authorized to run code as root, or to affect system or cluster configuration. + isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) + + // isRunningImageAllowed returns true if the requirement allows running an image. + // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // WARNING: This validates signatures and the manifest, but does not download or validate the + // layers. Users must validate that the layers match their expected digests. + isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. +type PolicyReferenceMatch interface { + // matchesDockerReference decides whether a specific image identity is accepted for an image + // (or, usually, for the image's Reference().DockerReference()). Note that + // image.Reference().DockerReference() may be nil. + matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool +} + +// PolicyContext encapsulates a policy and possible cached state +// for speeding up its evaluation. +type PolicyContext struct { + Policy *Policy + state policyContextState // Internal consistency checking +} + +// policyContextState is used internally to verify the users are not misusing a PolicyContext. +type policyContextState string + +const ( + pcInitializing policyContextState = "Initializing" + pcReady policyContextState = "Ready" + pcInUse policyContextState = "InUse" + pcDestroying policyContextState = "Destroying" + pcDestroyed policyContextState = "Destroyed" +) + +// changeState changes pc.state, or fails if the state is unexpected +func (pc *PolicyContext) changeState(expected, new policyContextState) error { + if pc.state != expected { + return fmt.Errorf(`Invalid PolicyContext state, expected %q, found %q`, expected, pc.state) + } + pc.state = new + return nil +} + +// NewPolicyContext sets up and initializes a context for the specified policy. +// The policy must not be modified while the context exists. FIXME: make a deep copy? +// If this function succeeds, the caller should call PolicyContext.Destroy() when done. +func NewPolicyContext(policy *Policy) (*PolicyContext, error) { + pc := &PolicyContext{Policy: policy, state: pcInitializing} + // FIXME: initialize + if err := pc.changeState(pcInitializing, pcReady); err != nil { + // Huh?! This should never fail, we didn't give the pointer to anybody. + // Just give up and leave unclean state around. + return nil, err + } + return pc, nil +} + +// Destroy should be called when the user of the context is done with it. +func (pc *PolicyContext) Destroy() error { + if err := pc.changeState(pcReady, pcDestroying); err != nil { + return err + } + // FIXME: destroy + return pc.changeState(pcDestroying, pcDestroyed) +} + +// policyIdentityLogName returns a string description of the image identity for policy purposes. +// ONLY use this for log messages, not for any decisions! +func policyIdentityLogName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity() +} + +// requirementsForImageRef selects the appropriate requirements for ref. +func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements { + // Do we have a PolicyTransportScopes for this transport? + transportName := ref.Transport().Name() + if transportScopes, ok := pc.Policy.Transports[transportName]; ok { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if req, ok := transportScopes[identity]; ok { + logrus.Debugf(` Using transport %q policy section %q`, transportName, identity) + return req + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if req, ok := transportScopes[name]; ok { + logrus.Debugf(` Using transport %q specific policy section %q`, transportName, name) + return req + } + } + + // Look for a default match for the transport. + if req, ok := transportScopes[""]; ok { + logrus.Debugf(` Using transport %q policy section ""`, transportName) + return req + } + } + + logrus.Debugf(" Using default policy section") + return pc.Policy.Default +} + +// GetSignaturesWithAcceptedAuthor returns those signatures from an image +// for which the policy accepts the author (and which have been successfully +// verified). +// NOTE: This may legitimately return an empty list and no error, if the image +// has no signatures or only invalid signatures. +// WARNING: This makes the signature contents acceptable for further processing, +// but it does not necessarily mean that the contents of the signature are +// consistent with local policy. +// For example: +// - Do not use a an existence of an accepted signature to determine whether to run +// a container based on this image; use IsRunningImageAllowed instead. +// - Just because a signature is accepted does not automatically mean the contents of the +// signature are authorized to run code as root, or to affect system or cluster configuration. +func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, publicImage types.UnparsedImage) (sigs []*Signature, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return nil, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + sigs = nil + finalErr = err + } + }() + + image := unparsedimage.FromPublic(publicImage) + + logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + // FIXME: Use image.UntrustedSignatures, use that to improve error messages (needs tests!) + unverifiedSignatures, err := image.Signatures(ctx) + if err != nil { + return nil, err + } + + res := make([]*Signature, 0, len(unverifiedSignatures)) + for sigNumber, sig := range unverifiedSignatures { + var acceptedSig *Signature // non-nil if accepted + rejected := false + // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?! + logrus.Debugf("Evaluating signature %d:", sigNumber) + interpretingReqs: + for reqNumber, req := range reqs { + // FIXME: Log the requirement itself? For now, we use just the number. + // FIXME: supply state + switch res, as, err := req.isSignatureAuthorAccepted(ctx, image, sig); res { + case sarAccepted: + if as == nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature accepted", reqNumber) + if acceptedSig == nil { + acceptedSig = as + } else if *as != *acceptedSig { // Coverage: this should never happen + // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents? + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber) + rejected = true + acceptedSig = nil + break interpretingReqs + } + case sarRejected: + logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + case sarUnknown: + if err != nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber) + default: // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res)) + rejected = true + break interpretingReqs + } + } + // This also handles the (invalid) case of empty reqs, by rejecting the signature. + if acceptedSig != nil && !rejected { + logrus.Debugf(" Overall: OK, signature accepted") + res = append(res, acceptedSig) + } else { + logrus.Debugf(" Overall: Signature not accepted") + } + } + return res, nil +} + +// IsRunningImageAllowed returns true iff the policy allows running the image. +// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation +// succeeded but the result was rejection. +// WARNING: This validates signatures and the manifest, but does not download or validate the +// layers. Users must validate that the layers match their expected digests. +func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, publicImage types.UnparsedImage) (res bool, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return false, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + res = false + finalErr = err + } + }() + + image := unparsedimage.FromPublic(publicImage) + + logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + if len(reqs) == 0 { + return false, PolicyRequirementError("List of verification policy requirements must not be empty") + } + + for reqNumber, req := range reqs { + // FIXME: supply state + allowed, err := req.isRunningImageAllowed(ctx, image) + if !allowed { + logrus.Debugf("Requirement %d: denied, done", reqNumber) + return false, err + } + logrus.Debugf(" Requirement %d: allowed", reqNumber) + } + // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image. + logrus.Debugf("Overall: allowed") + return true, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go new file mode 100644 index 0000000000..a8bc013010 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go @@ -0,0 +1,20 @@ +// Policy evaluation for prSignedBaseLayer. + +package signature + +import ( + "context" + + "github.com/containers/image/v5/internal/private" + "github.com/sirupsen/logrus" +) + +func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarUnknown, nil, nil +} + +func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + // FIXME? Reject this at policy parsing time already? + logrus.Errorf("signedBaseLayer not implemented yet!") + return false, PolicyRequirementError("signedBaseLayer not implemented yet!") +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go new file mode 100644 index 0000000000..e5c9329185 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -0,0 +1,124 @@ +// Policy evaluation for prSignedBy. + +package signature + +import ( + "context" + "errors" + "fmt" + "slices" + + "github.com/containers/image/v5/internal/multierr" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/manifest" + digest "github.com/opencontainers/go-digest" +) + +func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + switch pr.KeyType { + case SBKeyTypeGPGKeys: + case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + // FIXME? Reject this at policy parsing time already? + return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value %q`, string(pr.KeyType)) + default: + // This should never happen, newPRSignedBy ensures KeyType.IsValid() + return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value %q`, string(pr.KeyType)) + } + + // FIXME: move this to per-context initialization + const notOneSourceErrorText = `Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified` + data, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: notOneSourceErrorText, + path: pr.KeyPath, + paths: pr.KeyPaths, + data: pr.KeyData, + }) + if err != nil { + return sarRejected, nil, err + } + if data == nil { + return sarRejected, nil, errors.New(notOneSourceErrorText) + } + + // FIXME: move this to per-context initialization + mech, trustedIdentities, err := newEphemeralGPGSigningMechanism(data) + if err != nil { + return sarRejected, nil, err + } + defer mech.Close() + if len(trustedIdentities) == 0 { + return sarRejected, nil, PolicyRequirementError("No public keys imported") + } + + signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + if slices.Contains(trustedIdentities, keyIdentity) { + return nil + } + // Coverage: We use a private GPG home directory and only import trusted keys, so this should + // not be reachable. + return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity)) + }, + validateSignedDockerReference: func(ref string) error { + if !pr.SignedIdentity.matchesDockerReference(image, ref) { + return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref)) + } + return nil + }, + validateSignedDockerManifestDigest: func(digest digest.Digest) error { + m, _, err := image.Manifest(ctx) + if err != nil { + return err + } + digestMatches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return err + } + if !digestMatches { + return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) + } + return nil + }, + }) + if err != nil { + return sarRejected, nil, err + } + + return sarAccepted, signature, nil +} + +func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + // FIXME: Use image.UntrustedSignatures, use that to improve error messages + // (needs tests!) + sigs, err := image.Signatures(ctx) + if err != nil { + return false, err + } + var rejections []error + for _, s := range sigs { + var reason error + switch res, _, err := pr.isSignatureAuthorAccepted(ctx, image, s); res { + case sarAccepted: + // One accepted signature is enough. + return true, nil + case sarRejected: + reason = err + case sarUnknown: + // Huh?! This should not happen at all; treat it as any other invalid value. + fallthrough + default: + reason = fmt.Errorf(`Internal error: Unexpected signature verification result %q`, string(res)) + } + rejections = append(rejections, reason) + } + var summary error + switch len(rejections) { + case 0: + summary = PolicyRequirementError("A signature was required, but no signature exists") + case 1: + summary = rejections[0] + default: + summary = PolicyRequirementError(multierr.Format("None of the signatures were accepted, reasons: ", "; ", "", rejections).Error()) + } + return false, summary +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go new file mode 100644 index 0000000000..faede787a1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go @@ -0,0 +1,435 @@ +// Policy evaluation for prSigstoreSigned. + +package signature + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/x509" + "errors" + "fmt" + "os" + "strings" + + "github.com/containers/image/v5/internal/multierr" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature/internal" + digest "github.com/opencontainers/go-digest" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +// configBytesSources contains configuration fields which may result in one or more []byte values +type configBytesSources struct { + inconsistencyErrorMessage string // Error to return if more than one source is set + path string // …Path: a path to a file containing the data, or "" + paths []string // …Paths: paths to files containing the data, or nil + data []byte // …Data: a single instance ofhe raw data, or nil + datas [][]byte // …Datas: the raw data, or nil // codespell:ignore datas +} + +// loadBytesFromConfigSources ensures at most one of the sources in src is set, +// and returns the referenced data, or nil if neither is set. +func loadBytesFromConfigSources(src configBytesSources) ([][]byte, error) { + sources := 0 + var data [][]byte // = nil + if src.path != "" { + sources++ + d, err := os.ReadFile(src.path) + if err != nil { + return nil, err + } + data = [][]byte{d} + } + if src.paths != nil { + sources++ + data = [][]byte{} + for _, path := range src.paths { + d, err := os.ReadFile(path) + if err != nil { + return nil, err + } + data = append(data, d) + } + } + if src.data != nil { + sources++ + data = [][]byte{src.data} + } + if src.datas != nil { // codespell:ignore datas + sources++ + data = src.datas // codespell:ignore datas + } + if sources > 1 { + return nil, errors.New(src.inconsistencyErrorMessage) + } + return data, nil +} + +// prepareTrustRoot creates a fulcioTrustRoot from the input data. +// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.) +func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) { + caCertPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "caPath" and "caData" specified`, + path: f.CAPath, + data: f.CAData, + }) + if err != nil { + return nil, err + } + if len(caCertPEMs) != 1 { + return nil, errors.New(`Internal inconsistency: Fulcio specified with not exactly one of "caPath" nor "caData"`) + } + certs := x509.NewCertPool() + if ok := certs.AppendCertsFromPEM(caCertPEMs[0]); !ok { + return nil, errors.New("error loading Fulcio CA certificates") + } + fulcio := fulcioTrustRoot{ + caCertificates: certs, + oidcIssuer: f.OIDCIssuer, + subjectEmail: f.SubjectEmail, + } + if err := fulcio.validate(); err != nil { + return nil, err + } + return &fulcio, nil +} + +// prepareTrustRoot creates a pkiTrustRoot from the input data. +// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedPKI is the only one.) +func (p *prSigstoreSignedPKI) prepareTrustRoot() (*pkiTrustRoot, error) { + caRootsCertPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "caRootsPath" and "caRootsData" specified`, + path: p.CARootsPath, + data: p.CARootsData, + }) + if err != nil { + return nil, err + } + if len(caRootsCertPEMs) != 1 { + return nil, errors.New(`Internal inconsistency: PKI specified with not exactly one of "caRootsPath" nor "caRootsData"`) + } + rootsCerts := x509.NewCertPool() + if ok := rootsCerts.AppendCertsFromPEM(caRootsCertPEMs[0]); !ok { + return nil, errors.New("error loading PKI CA Roots certificates") + } + pki := pkiTrustRoot{ + caRootsCertificates: rootsCerts, + subjectEmail: p.SubjectEmail, + subjectHostname: p.SubjectHostname, + } + caIntermediateCertPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "caIntermediatesPath" and "caIntermediatesData" specified`, + path: p.CAIntermediatesPath, + data: p.CAIntermediatesData, + }) + if err != nil { + return nil, err + } + if caIntermediateCertPEMs != nil { + if len(caIntermediateCertPEMs) != 1 { + return nil, errors.New(`Internal inconsistency: PKI specified with invalid value from "caIntermediatesPath" or "caIntermediatesData"`) + } + intermediatePool := x509.NewCertPool() + trustedIntermediates, err := cryptoutils.UnmarshalCertificatesFromPEM(caIntermediateCertPEMs[0]) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading trusted intermediate certificates: %v", err)) + } + for _, trustedIntermediateCert := range trustedIntermediates { + intermediatePool.AddCert(trustedIntermediateCert) + } + pki.caIntermediateCertificates = intermediatePool + } + + if err := pki.validate(); err != nil { + return nil, err + } + return &pki, nil +} + +// sigstoreSignedTrustRoot contains an already parsed version of the prSigstoreSigned policy +type sigstoreSignedTrustRoot struct { + publicKeys []crypto.PublicKey + fulcio *fulcioTrustRoot + rekorPublicKeys []*ecdsa.PublicKey + pki *pkiTrustRoot +} + +func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) { + res := sigstoreSignedTrustRoot{} + + publicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: more than one of "keyPath", "keyPaths", "keyData", "keyDatas" specified`, + path: pr.KeyPath, + paths: pr.KeyPaths, + data: pr.KeyData, + datas: pr.KeyDatas, // codespell:ignore datas + }) + if err != nil { + return nil, err + } + if publicKeyPEMs != nil { + for index, keyData := range publicKeyPEMs { + pk, err := cryptoutils.UnmarshalPEMToPublicKey(keyData) + if err != nil { + return nil, fmt.Errorf("parsing public key %d: %w", index+1, err) + } + res.publicKeys = append(res.publicKeys, pk) + } + if len(res.publicKeys) == 0 { + return nil, errors.New(`Internal inconsistency: "keyPath", "keyPaths", "keyData" and "keyDatas" produced no public keys`) + } + } + + if pr.Fulcio != nil { + f, err := pr.Fulcio.prepareTrustRoot() + if err != nil { + return nil, err + } + res.fulcio = f + } + + rekorPublicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "rekorPublicKeyPath" and "rekorPublicKeyData" specified`, + path: pr.RekorPublicKeyPath, + paths: pr.RekorPublicKeyPaths, + data: pr.RekorPublicKeyData, + datas: pr.RekorPublicKeyDatas, // codespell:ignore datas + }) + if err != nil { + return nil, err + } + if rekorPublicKeyPEMs != nil { + for index, pem := range rekorPublicKeyPEMs { + pk, err := cryptoutils.UnmarshalPEMToPublicKey(pem) + if err != nil { + return nil, fmt.Errorf("parsing Rekor public key %d: %w", index+1, err) + } + pkECDSA, ok := pk.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("Rekor public key %d is not using ECDSA", index+1) + + } + res.rekorPublicKeys = append(res.rekorPublicKeys, pkECDSA) + } + if len(res.rekorPublicKeys) == 0 { + return nil, errors.New(`Internal inconsistency: "rekorPublicKeyPath", "rekorPublicKeyPaths", "rekorPublicKeyData" and "rekorPublicKeyDatas" produced no public keys`) + } + } + + if pr.PKI != nil { + p, err := pr.PKI.prepareTrustRoot() + if err != nil { + return nil, err + } + res.pki = p + } + + return &res, nil +} + +func (pr *prSigstoreSigned) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + // We don’t know of a single user of this API, and we might return unexpected values in Signature. + // For now, just punt. + return sarRejected, nil, errors.New("isSignatureAuthorAccepted is not implemented for sigstore") +} + +func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image private.UnparsedImage, sig signature.Sigstore) (signatureAcceptanceResult, error) { + // FIXME: move this to per-context initialization + trustRoot, err := pr.prepareTrustRoot() + if err != nil { + return sarRejected, err + } + + untrustedAnnotations := sig.UntrustedAnnotations() + untrustedBase64Signature, ok := untrustedAnnotations[signature.SigstoreSignatureAnnotationKey] + if !ok { + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSignatureAnnotationKey) + } + untrustedPayload := sig.UntrustedPayload() + + keySources := 0 + if trustRoot.publicKeys != nil { + keySources++ + } + if trustRoot.fulcio != nil { + keySources++ + } + if trustRoot.pki != nil { + keySources++ + } + + var publicKeys []crypto.PublicKey + switch { + case keySources > 1: // newPRSigstoreSigned rejects more than one key sources. + return sarRejected, errors.New("Internal inconsistency: More than one of public key, Fulcio, or PKI specified") + case keySources == 0: // newPRSigstoreSigned rejects empty key sources. + return sarRejected, errors.New("Internal inconsistency: A public key, Fulcio, or PKI must be specified.") + case trustRoot.publicKeys != nil: + if trustRoot.rekorPublicKeys != nil { + untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey] + if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should work. + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey) + } + + var rekorFailures []string + for _, candidatePublicKey := range trustRoot.publicKeys { + // We could use publicKeyPEM directly, but let’s re-marshal to avoid inconsistencies. + // FIXME: We could just generate DER instead of the full PEM text + recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(candidatePublicKey) + if err != nil { + // Coverage: The key was loaded from a PEM format, so it’s unclear how this could fail. + // (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.) + return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err) + } + // We don’t care about the Rekor timestamp, just about log presence. + _, err = internal.VerifyRekorSET(trustRoot.rekorPublicKeys, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload) + if err == nil { + publicKeys = append(publicKeys, candidatePublicKey) + break // The SET can only accept one public key entry, so if we found one, the rest either doesn’t match or is a duplicate + } + rekorFailures = append(rekorFailures, err.Error()) + } + if len(publicKeys) == 0 { + if len(rekorFailures) == 0 { + // Coverage: We have ensured that len(trustRoot.publicKeys) != 0, when nothing succeeds, there must be at least one failure. + return sarRejected, errors.New(`Internal inconsistency: Rekor SET did not match any key but we have no failures.`) + } + return sarRejected, internal.NewInvalidSignatureError(fmt.Sprintf("No public key verified against the RekorSET: %s", strings.Join(rekorFailures, ", "))) + } + } else { + publicKeys = trustRoot.publicKeys + } + + case trustRoot.fulcio != nil: + if trustRoot.rekorPublicKeys == nil { // newPRSigstoreSigned rejects such combinations. + return sarRejected, errors.New("Internal inconsistency: Fulcio CA specified without a Rekor public key") + } + untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey] + if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should correctly reject it anyway. + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey) + } + untrustedCert, ok := untrustedAnnotations[signature.SigstoreCertificateAnnotationKey] + if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should correctly reject it anyway. + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreCertificateAnnotationKey) + } + var untrustedIntermediateChainBytes []byte + if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok { + untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain) + } + pk, err := verifyRekorFulcio(trustRoot.rekorPublicKeys, trustRoot.fulcio, + []byte(untrustedSET), []byte(untrustedCert), untrustedIntermediateChainBytes, untrustedBase64Signature, untrustedPayload) + if err != nil { + return sarRejected, err + } + publicKeys = []crypto.PublicKey{pk} + + case trustRoot.pki != nil: + if trustRoot.rekorPublicKeys != nil { // newPRSigstoreSigned rejects such combinations. + return sarRejected, errors.New("Internal inconsistency: PKI specified with a Rekor public key") + } + untrustedCert, ok := untrustedAnnotations[signature.SigstoreCertificateAnnotationKey] + if !ok { + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreCertificateAnnotationKey) + } + var untrustedIntermediateChainBytes []byte + if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok { + untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain) + } + pk, err := verifyPKI(trustRoot.pki, []byte(untrustedCert), untrustedIntermediateChainBytes) + if err != nil { + return sarRejected, err + } + publicKeys = []crypto.PublicKey{pk} + } + + if len(publicKeys) == 0 { + // Coverage: This should never happen, we ensured that trustRoot.publicKeys is non-empty if set, + // and we have already excluded the possibility in the switch above. + return sarRejected, fmt.Errorf("Internal inconsistency: publicKey not set before verifying sigstore payload") + } + signature, err := internal.VerifySigstorePayload(publicKeys, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{ + ValidateSignedDockerReference: func(ref string) error { + if !pr.SignedIdentity.matchesDockerReference(image, ref) { + return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref)) + } + return nil + }, + ValidateSignedDockerManifestDigest: func(digest digest.Digest) error { + m, _, err := image.Manifest(ctx) + if err != nil { + return err + } + digestMatches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return err + } + if !digestMatches { + return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) + } + return nil + }, + }) + if err != nil { + return sarRejected, err + } + if signature == nil { // A paranoid sanity check that VerifySigstorePayload has returned consistent values + return sarRejected, errors.New("internal error: VerifySigstorePayload succeeded but returned no data") // Coverage: This should never happen. + } + + return sarAccepted, nil +} + +func (pr *prSigstoreSigned) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + sigs, err := image.UntrustedSignatures(ctx) + if err != nil { + return false, err + } + var rejections []error + foundNonSigstoreSignatures := 0 + foundSigstoreNonAttachments := 0 + for _, s := range sigs { + sigstoreSig, ok := s.(signature.Sigstore) + if !ok { + foundNonSigstoreSignatures++ + continue + } + if sigstoreSig.UntrustedMIMEType() != signature.SigstoreSignatureMIMEType { + foundSigstoreNonAttachments++ + continue + } + + var reason error + switch res, err := pr.isSignatureAccepted(ctx, image, sigstoreSig); res { + case sarAccepted: + // One accepted signature is enough. + return true, nil + case sarRejected: + reason = err + case sarUnknown: + // Huh?! This should not happen at all; treat it as any other invalid value. + fallthrough + default: + reason = fmt.Errorf(`Internal error: Unexpected signature verification result %q`, string(res)) + } + rejections = append(rejections, reason) + } + var summary error + switch len(rejections) { + case 0: + if foundNonSigstoreSignatures == 0 && foundSigstoreNonAttachments == 0 { + // A nice message for the most common case. + summary = PolicyRequirementError("A signature was required, but no signature exists") + } else { + summary = PolicyRequirementError(fmt.Sprintf("A signature was required, but no signature exists (%d non-sigstore signatures, %d sigstore non-signature attachments)", + foundNonSigstoreSignatures, foundSigstoreNonAttachments)) + } + case 1: + summary = rejections[0] + default: + summary = PolicyRequirementError(multierr.Format("None of the signatures were accepted, reasons: ", "; ", "", rejections).Error()) + } + return false, summary +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go new file mode 100644 index 0000000000..031866f0dc --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go @@ -0,0 +1,29 @@ +// Policy evaluation for the various simple PolicyRequirement types. + +package signature + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/transports" +) + +func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + // prInsecureAcceptAnything semantics: Every image is allowed to run, + // but this does not consider the signature as verified. + return sarUnknown, nil, nil +} + +func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + return true, nil +} + +func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference()))) +} + +func (pr *prReject) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference()))) +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_paths_common.go b/vendor/github.com/containers/image/v5/signature/policy_paths_common.go new file mode 100644 index 0000000000..038351cb74 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_paths_common.go @@ -0,0 +1,7 @@ +//go:build !freebsd + +package signature + +// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). +// DO NOT change this, instead see systemDefaultPolicyPath above. +const builtinDefaultPolicyPath = "/etc/containers/policy.json" diff --git a/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go b/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go new file mode 100644 index 0000000000..6a45a78fa1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go @@ -0,0 +1,7 @@ +//go:build freebsd + +package signature + +// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). +// DO NOT change this, instead see systemDefaultPolicyPath above. +const builtinDefaultPolicyPath = "/usr/local/etc/containers/policy.json" diff --git a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go new file mode 100644 index 0000000000..390957b02b --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go @@ -0,0 +1,154 @@ +// PolicyReferenceMatch implementations. + +package signature + +import ( + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/transports" +) + +// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images. +func parseImageAndDockerReference(image private.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { + r1 := image.Reference().DockerReference() + if r1 == nil { + return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity", + transports.ImageName(image.Reference()))) + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmMatchExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +// matchRepoDigestOrExactReferenceValues implements prmMatchRepoDigestOrExact.matchesDockerReference +// using reference.Named values. +func matchRepoDigestOrExactReferenceValues(intended, signature reference.Named) bool { + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(signature) { + return false + } + switch intended.(type) { + case reference.NamedTagged: // Includes the case when intended has both a tag and a digest. + return signature.String() == intended.String() + case reference.Canonical: + // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest. + // Because UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest, + // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms) + return signature.Name() == intended.Name() + default: // !reference.IsNameOnly(intended) + return false + } +} +func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + return matchRepoDigestOrExactReferenceValues(intended, signature) +} + +func (prm *prmMatchRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} + +// parseDockerReferences converts two reference strings into parsed entities, failing on any error +func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) { + r1, err := reference.ParseNormalizedNamed(s1) + if err != nil { + return nil, nil, err + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmExactReference) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference) + if err != nil { + return false + } + // prm.DockerReference and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +func (prm *prmExactRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} + +// refMatchesPrefix returns true if ref matches prm.Prefix. +func (prm *prmRemapIdentity) refMatchesPrefix(ref reference.Named) bool { + name := ref.Name() + switch { + case len(name) < len(prm.Prefix): + return false + case len(name) == len(prm.Prefix): + return name == prm.Prefix + case len(name) > len(prm.Prefix): + // We are matching only ref.Name(), not ref.String(), so the only separator we are + // expecting is '/': + // - '@' is only valid to separate a digest, i.e. not a part of ref.Name() + // - similarly ':' to mark a tag would not be a part of ref.Name(); it can be a part of a + // host:port domain syntax, but we don't treat that specially and require an exact match + // of the domain. + return strings.HasPrefix(name, prm.Prefix) && name[len(prm.Prefix)] == '/' + default: + panic("Internal error: impossible comparison outcome") + } +} + +// remapReferencePrefix returns the result of remapping ref, if it matches prm.Prefix +// or the original ref if it does not. +func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (reference.Named, error) { + if !prm.refMatchesPrefix(ref) { + return ref, nil + } + refString := ref.String() + newNamedRef := strings.Replace(refString, prm.Prefix, prm.SignedPrefix, 1) + newParsedRef, err := reference.ParseNamed(newNamedRef) + if err != nil { + return nil, fmt.Errorf(`error rewriting reference from %q to %q: %w`, refString, newNamedRef, err) + } + return newParsedRef, nil +} + +func (prm *prmRemapIdentity) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + intended, err = prm.remapReferencePrefix(intended) + if err != nil { + return false + } + return matchRepoDigestOrExactReferenceValues(intended, signature) +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_types.go b/vendor/github.com/containers/image/v5/signature/policy_types.go new file mode 100644 index 0000000000..2d107ed450 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_types.go @@ -0,0 +1,253 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// This defines types used to represent a signature verification policy in memory. +// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements +// built using the constructor functions provided in policy_config.go. + +package signature + +// NOTE: Keep this in sync with docs/containers-policy.json.5.md! + +// Policy defines requirements for considering a signature, or an image, valid. +type Policy struct { + // Default applies to any image which does not have a matching policy in Transports. + // Note that this can happen even if a matching PolicyTransportScopes exists in Transports + // if the image matches none of the scopes. + Default PolicyRequirements `json:"default"` + Transports map[string]PolicyTransportScopes `json:"transports"` +} + +// PolicyTransportScopes defines policies for images for a specific transport, +// for various scopes, the map keys. +// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.); +// there is one scope precisely matching to a single image, and namespace scopes as prefixes +// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]]) +// The empty scope, if exists, is considered a parent namespace of all other scopes. +// Most specific scope wins, duplication is prohibited (hard failure). +type PolicyTransportScopes map[string]PolicyRequirements + +// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature). +// Must not be empty, frequently will only contain a single element. +type PolicyRequirements []PolicyRequirement + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. + +// prCommon is the common type field in a JSON encoding of PolicyRequirement. +type prCommon struct { + Type prTypeIdentifier `json:"type"` +} + +// prTypeIdentifier is string designating a kind of a PolicyRequirement. +type prTypeIdentifier string + +const ( + prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything" + prTypeReject prTypeIdentifier = "reject" + prTypeSignedBy prTypeIdentifier = "signedBy" + prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer" + prTypeSigstoreSigned prTypeIdentifier = "sigstoreSigned" +) + +// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything: +// every image is allowed to run. +// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit). +// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted). +// FIXME? Better name? +type prInsecureAcceptAnything struct { + prCommon +} + +// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected. +type prReject struct { + prCommon +} + +// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity +type prSignedBy struct { + prCommon + + // KeyType specifies what kind of key reference KeyPath/KeyPaths/KeyData is. + // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs” + // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only + KeyType sbKeyType `json:"keyType"` + + // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified. + KeyPath string `json:"keyPath,omitempty"` + // KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified. + KeyPaths []string `json:"keyPaths,omitempty"` + // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath, KeyPaths and KeyData must be specified. + KeyData []byte `json:"keyData,omitempty"` + + // SignedIdentity specifies what image identity the signature must be claiming about the image. + // Defaults to "matchRepoDigestOrExact" if not specified. + SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` +} + +// sbKeyType are the allowed values for prSignedBy.KeyType +type sbKeyType string + +const ( + // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring + SBKeyTypeGPGKeys sbKeyType = "GPGKeys" + // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring + SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys" + // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates + // FIXME: PEM, DER? + SBKeyTypeX509Certificates sbKeyType = "X509Certificates" + // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs + // FIXME: PEM, DER? + SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs" +) + +// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image. +type prSignedBaseLayer struct { + prCommon + // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful. + BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"` +} + +// prSigstoreSigned is a PolicyRequirement with type = prTypeSigstoreSigned: the image is signed by trusted keys for a specified identity +type prSigstoreSigned struct { + prCommon + + // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + KeyPath string `json:"keyPath,omitempty"` + // KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + KeyPaths []string `json:"keyPaths,omitempty"` + // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + KeyData []byte `json:"keyData,omitempty"` + // KeyDatas is a set of trusted keys, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + KeyDatas [][]byte `json:"keyDatas,omitempty"` + + // Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + // If Fulcio is specified, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well. + Fulcio PRSigstoreSignedFulcio `json:"fulcio,omitempty"` + + // RekorPublicKeyPath is a pathname to local file containing a public key of a Rekor server which must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyPath string `json:"rekorPublicKeyPath,omitempty"` + // RekorPublicKeyPaths is a set of pathnames to local files, each containing a public key of a Rekor server. One of the keys must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyPaths []string `json:"rekorPublicKeyPaths,omitempty"` + // RekorPublicKeyPath contain a base64-encoded public key of a Rekor server which must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyData []byte `json:"rekorPublicKeyData,omitempty"` + // RekorPublicKeyDatas each contain a base64-encoded public key of a Rekor server. One of the keys must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyDatas [][]byte `json:"rekorPublicKeyDatas,omitempty"` + + // PKI specifies which PKI-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + PKI PRSigstoreSignedPKI `json:"pki,omitempty"` + + // SignedIdentity specifies what image identity the signature must be claiming about the image. + // Defaults to "matchRepoDigestOrExact" if not specified. + // Note that /usr/bin/cosign interoperability might require using repo-only matching. + SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` +} + +// PRSigstoreSignedFulcio contains Fulcio configuration options for a "sigstoreSigned" PolicyRequirement. +// This is a public type with a single private implementation. +type PRSigstoreSignedFulcio interface { + // toFulcioTrustRoot creates a fulcioTrustRoot from the input data. + // (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.) + prepareTrustRoot() (*fulcioTrustRoot, error) +} + +// prSigstoreSignedFulcio collects Fulcio configuration options for prSigstoreSigned +type prSigstoreSignedFulcio struct { + // CAPath a path to a file containing accepted CA root certificates, in PEM format. Exactly one of CAPath and CAData must be specified. + CAPath string `json:"caPath,omitempty"` + // CAData contains accepted CA root certificates in PEM format, all of that base64-encoded. Exactly one of CAPath and CAData must be specified. + CAData []byte `json:"caData,omitempty"` + // OIDCIssuer specifies the expected OIDC issuer, recorded by Fulcio into the generated certificates. + OIDCIssuer string `json:"oidcIssuer,omitempty"` + // SubjectEmail specifies the expected email address of the authenticated OIDC identity, recorded by Fulcio into the generated certificates. + SubjectEmail string `json:"subjectEmail,omitempty"` +} + +// PRSigstoreSignedPKI contains PKI configuration options for a "sigstoreSigned" PolicyRequirement. +type PRSigstoreSignedPKI interface { + // prepareTrustRoot creates a pkiTrustRoot from the input data. + // (This also prevents external implementations of this interface, ensuring that prSigstoreSignedPKI is the only one.) + prepareTrustRoot() (*pkiTrustRoot, error) +} + +// prSigstoreSignedPKI contains non-fulcio certificate PKI configuration options for prSigstoreSigned +type prSigstoreSignedPKI struct { + // CARootsPath a path to a file containing accepted CA root certificates, in PEM format. Exactly one of CARootsPath and CARootsData must be specified. + CARootsPath string `json:"caRootsPath,omitempty"` + // CARootsData contains accepted CA root certificates in PEM format, all of that base64-encoded. Exactly one of CARootsPath and CARootsData must be specified. + CARootsData []byte `json:"caRootsData,omitempty"` + // CAIntermediatesPath a path to a file containing accepted CA intermediate certificates, in PEM format. Only one of CAIntermediatesPath or CAIntermediatesData can be specified, not both. + CAIntermediatesPath string `json:"caIntermediatesPath,omitempty"` + // CAIntermediatesData contains accepted CA intermediate certificates in PEM format, all of that base64-encoded. Only one of CAIntermediatesPath or CAIntermediatesData can be specified, not both. + CAIntermediatesData []byte `json:"caIntermediatesData,omitempty"` + + // SubjectEmail specifies the expected email address imposed on the subject to which the certificate was issued. At least one of SubjectEmail and SubjectHostname must be specified. + SubjectEmail string `json:"subjectEmail,omitempty"` + // SubjectHostname specifies the expected hostname imposed on the subject to which the certificate was issued. At least one of SubjectEmail and SubjectHostname must be specified. + SubjectHostname string `json:"subjectHostname,omitempty"` +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. + +// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch. +type prmCommon struct { + Type prmTypeIdentifier `json:"type"` +} + +// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch. +type prmTypeIdentifier string + +const ( + prmTypeMatchExact prmTypeIdentifier = "matchExact" + prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact" + prmTypeMatchRepository prmTypeIdentifier = "matchRepository" + prmTypeExactReference prmTypeIdentifier = "exactReference" + prmTypeExactRepository prmTypeIdentifier = "exactRepository" + prmTypeRemapIdentity prmTypeIdentifier = "remapIdentity" +) + +// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly. +type prmMatchExact struct { + prmCommon +} + +// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly, +// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest +type prmMatchRepoDigestOrExact struct { + prmCommon +} + +// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag. +type prmMatchRepository struct { + prmCommon +} + +// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly. +type prmExactReference struct { + prmCommon + DockerReference string `json:"dockerReference"` +} + +// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag. +type prmExactRepository struct { + prmCommon + DockerRepository string `json:"dockerRepository"` +} + +// prmRemapIdentity is a PolicyReferenceMatch with type = prmRemapIdentity: like prmMatchRepoDigestOrExact, +// except that a namespace (at least a host:port, at most a single repository) is substituted before matching the two references. +type prmRemapIdentity struct { + prmCommon + Prefix string `json:"prefix"` + SignedPrefix string `json:"signedPrefix"` + // Possibly let the users make a choice for tag/digest matching behavior + // similar to prmMatchExact/prmMatchRepository? +} diff --git a/vendor/github.com/containers/image/v5/signature/signer/signer.go b/vendor/github.com/containers/image/v5/signature/signer/signer.go new file mode 100644 index 0000000000..73ae550aa5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/signer/signer.go @@ -0,0 +1,9 @@ +package signer + +import "github.com/containers/image/v5/internal/signer" + +// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images. +// It can only be created from within the containers/image package; it can’t be implemented externally. +// +// The owner of a Signer must call Close() when done. +type Signer = signer.Signer diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/copied.go b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go new file mode 100644 index 0000000000..2e510f60e3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go @@ -0,0 +1,103 @@ +package sigstore + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + + "github.com/secure-systems-lab/go-securesystemslib/encrypted" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature" +) + +// The following code was copied from github.com/sigstore. +// FIXME: Eliminate that duplication. + +// Copyright 2021 The Sigstore Authors. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const ( + // from sigstore/cosign/pkg/cosign.CosignPrivateKeyPemType. + cosignPrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY" + // from sigstore/cosign/pkg/cosign.SigstorePrivateKeyPemType. + sigstorePrivateKeyPemType = "ENCRYPTED SIGSTORE PRIVATE KEY" +) + +// from sigstore/cosign/pkg/cosign.loadPrivateKey +// FIXME: Do we need all of these key formats? +func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) { + // Decrypt first + p, _ := pem.Decode(key) + if p == nil { + return nil, errors.New("invalid pem block") + } + if p.Type != sigstorePrivateKeyPemType && p.Type != cosignPrivateKeyPemType { + return nil, fmt.Errorf("unsupported pem type: %s", p.Type) + } + + x509Encoded, err := encrypted.Decrypt(p.Bytes, pass) + if err != nil { + return nil, fmt.Errorf("decrypt: %w", err) + } + + pk, err := x509.ParsePKCS8PrivateKey(x509Encoded) + if err != nil { + return nil, fmt.Errorf("parsing private key: %w", err) + } + switch pk := pk.(type) { + case *rsa.PrivateKey: + return signature.LoadRSAPKCS1v15SignerVerifier(pk, crypto.SHA256) + case *ecdsa.PrivateKey: + return signature.LoadECDSASignerVerifier(pk, crypto.SHA256) + case ed25519.PrivateKey: + return signature.LoadED25519SignerVerifier(pk) + default: + return nil, errors.New("unsupported key type") + } +} + +// simplified from sigstore/cosign/pkg/cosign.marshalKeyPair +// loadPrivateKey always requires a encryption, so this always requires a passphrase. +func marshalKeyPair(privateKey crypto.PrivateKey, publicKey crypto.PublicKey, password []byte) (_privateKey []byte, _publicKey []byte, err error) { + x509Encoded, err := x509.MarshalPKCS8PrivateKey(privateKey) + if err != nil { + return nil, nil, fmt.Errorf("x509 encoding private key: %w", err) + } + + encBytes, err := encrypted.Encrypt(x509Encoded, password) + if err != nil { + return nil, nil, err + } + + // store in PEM format + privBytes := pem.EncodeToMemory(&pem.Block{ + Bytes: encBytes, + // Use the older “COSIGN” type name; as of 2023-03-30 cosign’s main branch generates “SIGSTORE” types, + // but a version of cosign that can accept them has not yet been released. + Type: cosignPrivateKeyPemType, + }) + + // Now do the public key + pubBytes, err := cryptoutils.MarshalPublicKeyToPEM(publicKey) + if err != nil { + return nil, nil, err + } + + return privBytes, pubBytes, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/generate.go b/vendor/github.com/containers/image/v5/signature/sigstore/generate.go new file mode 100644 index 0000000000..77520c1232 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/generate.go @@ -0,0 +1,35 @@ +package sigstore + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" +) + +// GenerateKeyPairResult is a struct to ensure the private and public parts can not be confused by the caller. +type GenerateKeyPairResult struct { + PublicKey []byte + PrivateKey []byte +} + +// GenerateKeyPair generates a public/private key pair usable for signing images using the sigstore format, +// and returns key representations suitable for storing in long-term files (with the private key encrypted using the provided passphrase). +// The specific key kind (e.g. algorithm, size), as well as the file format, are unspecified by this API, +// and can change with best practices over time. +func GenerateKeyPair(passphrase []byte) (*GenerateKeyPairResult, error) { + // https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md#signature-schemes + // only requires ECDSA-P256 to be supported, so that’s what we must use. + rawKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + // Coverage: This can fail only if the randomness source fails + return nil, err + } + private, public, err := marshalKeyPair(rawKey, rawKey.Public(), passphrase) + if err != nil { + return nil, err + } + return &GenerateKeyPairResult{ + PublicKey: public, + PrivateKey: private, + }, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go b/vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go new file mode 100644 index 0000000000..c6258f408f --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go @@ -0,0 +1,95 @@ +package internal + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature/internal" + sigstoreSignature "github.com/sigstore/sigstore/pkg/signature" +) + +type Option func(*SigstoreSigner) error + +// SigstoreSigner is a signer.SignerImplementation implementation for sigstore signatures. +// It is initialized using various closures that implement Option, sadly over several subpackages, to decrease the +// dependency impact. +type SigstoreSigner struct { + PrivateKey sigstoreSignature.Signer // May be nil during initialization + SigningKeyOrCert []byte // For possible Rekor upload; always initialized together with PrivateKey + + // Fulcio results to include + FulcioGeneratedCertificate []byte // Or nil + FulcioGeneratedCertificateChain []byte // Or nil + + // Rekor state + RekorUploader func(ctx context.Context, keyOrCertBytes []byte, signatureBytes []byte, payloadBytes []byte) ([]byte, error) // Or nil +} + +// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. +func (s *SigstoreSigner) ProgressMessage() string { + return "Signing image using a sigstore signature" +} + +// SignImageManifest creates a new signature for manifest m as dockerReference. +func (s *SigstoreSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error) { + if s.PrivateKey == nil { + return nil, errors.New("internal error: nothing to sign with, should have been detected in NewSigner") + } + + if reference.IsNameOnly(dockerReference) { + return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String()) + } + manifestDigest, err := manifest.Digest(m) + if err != nil { + return nil, err + } + // sigstore/cosign completely ignores dockerReference for actual policy decisions. + // They record the repo (but NOT THE TAG) in the value; without the tag we can’t detect version rollbacks. + // So, just do what simple signing does, and cosign won’t mind. + payloadData := internal.NewUntrustedSigstorePayload(manifestDigest, dockerReference.String()) + payloadBytes, err := json.Marshal(payloadData) + if err != nil { + return nil, err + } + + // github.com/sigstore/cosign/internal/pkg/cosign.payloadSigner uses signatureoptions.WithContext(), + // which seems to be not used by anything. So we don’t bother. + signatureBytes, err := s.PrivateKey.SignMessage(bytes.NewReader(payloadBytes)) + if err != nil { + return nil, fmt.Errorf("creating signature: %w", err) + } + base64Signature := base64.StdEncoding.EncodeToString(signatureBytes) + var rekorSETBytes []byte // = nil + if s.RekorUploader != nil { + set, err := s.RekorUploader(ctx, s.SigningKeyOrCert, signatureBytes, payloadBytes) + if err != nil { + return nil, err + } + rekorSETBytes = set + } + + annotations := map[string]string{ + signature.SigstoreSignatureAnnotationKey: base64Signature, + } + if s.FulcioGeneratedCertificate != nil { + annotations[signature.SigstoreCertificateAnnotationKey] = string(s.FulcioGeneratedCertificate) + } + if s.FulcioGeneratedCertificateChain != nil { + annotations[signature.SigstoreIntermediateCertificateChainAnnotationKey] = string(s.FulcioGeneratedCertificateChain) + } + if rekorSETBytes != nil { + annotations[signature.SigstoreSETAnnotationKey] = string(rekorSETBytes) + } + return signature.SigstoreFromComponents(signature.SigstoreSignatureMIMEType, payloadBytes, annotations), nil +} + +func (s *SigstoreSigner) Close() error { + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/signer.go b/vendor/github.com/containers/image/v5/signature/sigstore/signer.go new file mode 100644 index 0000000000..fb825ada9d --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/signer.go @@ -0,0 +1,60 @@ +package sigstore + +import ( + "errors" + "fmt" + "os" + + internalSigner "github.com/containers/image/v5/internal/signer" + "github.com/containers/image/v5/signature/signer" + "github.com/containers/image/v5/signature/sigstore/internal" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +type Option = internal.Option + +func WithPrivateKeyFile(file string, passphrase []byte) Option { + return func(s *internal.SigstoreSigner) error { + if s.PrivateKey != nil { + return fmt.Errorf("multiple private key sources specified when preparing to create sigstore signatures") + } + + if passphrase == nil { + return errors.New("private key passphrase not provided") + } + + privateKeyPEM, err := os.ReadFile(file) + if err != nil { + return fmt.Errorf("reading private key from %s: %w", file, err) + } + signerVerifier, err := loadPrivateKey(privateKeyPEM, passphrase) + if err != nil { + return fmt.Errorf("initializing private key: %w", err) + } + publicKey, err := signerVerifier.PublicKey() + if err != nil { + return fmt.Errorf("getting public key from private key: %w", err) + } + publicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(publicKey) + if err != nil { + return fmt.Errorf("converting public key to PEM: %w", err) + } + s.PrivateKey = signerVerifier + s.SigningKeyOrCert = publicKeyPEM + return nil + } +} + +func NewSigner(opts ...Option) (*signer.Signer, error) { + s := internal.SigstoreSigner{} + for _, o := range opts { + if err := o(&s); err != nil { + return nil, err + } + } + if s.PrivateKey == nil { + return nil, errors.New("no private key source provided (neither a private key nor Fulcio) when preparing to create sigstore signatures") + } + + return internalSigner.NewSigner(&s), nil +} diff --git a/vendor/github.com/containers/image/v5/signature/simple.go b/vendor/github.com/containers/image/v5/signature/simple.go new file mode 100644 index 0000000000..94a8465930 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/simple.go @@ -0,0 +1,281 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json! + +package signature + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/containers/image/v5/signature/internal" + "github.com/containers/image/v5/version" + digest "github.com/opencontainers/go-digest" +) + +const ( + signatureType = "atomic container signature" +) + +// InvalidSignatureError is returned when parsing an invalid signature. +type InvalidSignatureError = internal.InvalidSignatureError + +// Signature is a parsed content of a signature. +// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below. +type Signature struct { + DockerManifestDigest digest.Digest + DockerReference string // FIXME: more precise type? +} + +// untrustedSignature is a parsed content of a signature. +type untrustedSignature struct { + untrustedDockerManifestDigest digest.Digest + untrustedDockerReference string // FIXME: more precise type? + untrustedCreatorID *string + // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, + // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). + // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, + // we would add another field, UntrustedTimestampNS int64. + untrustedTimestamp *int64 +} + +// UntrustedSignatureInformation is information available in an untrusted signature. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authority”) +type UntrustedSignatureInformation struct { + UntrustedDockerManifestDigest digest.Digest + UntrustedDockerReference string // FIXME: more precise type? + UntrustedCreatorID *string + UntrustedTimestamp *time.Time + UntrustedShortKeyIdentifier string +} + +// newUntrustedSignature returns an untrustedSignature object with +// the specified primary contents and appropriate metadata. +func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature { + // Use intermediate variables for these values so that we can take their addresses. + // Golang guarantees that they will have a new address on every execution. + creatorID := "atomic " + version.Version + timestamp := time.Now().Unix() + return untrustedSignature{ + untrustedDockerManifestDigest: dockerManifestDigest, + untrustedDockerReference: dockerReference, + untrustedCreatorID: &creatorID, + untrustedTimestamp: ×tamp, + } +} + +// A compile-time check that untrustedSignature and *untrustedSignature implements json.Marshaler +var _ json.Marshaler = untrustedSignature{} +var _ json.Marshaler = (*untrustedSignature)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s untrustedSignature) MarshalJSON() ([]byte, error) { + if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" { + return nil, errors.New("Unexpected empty signature content") + } + critical := map[string]any{ + "type": signatureType, + "image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.untrustedDockerReference}, + } + optional := map[string]any{} + if s.untrustedCreatorID != nil { + optional["creator"] = *s.untrustedCreatorID + } + if s.untrustedTimestamp != nil { + optional["timestamp"] = *s.untrustedTimestamp + } + signature := map[string]any{ + "critical": critical, + "optional": optional, + } + return json.Marshal(signature) +} + +// Compile-time check that untrustedSignature implements json.Unmarshaler +var _ json.Unmarshaler = (*untrustedSignature)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *untrustedSignature) UnmarshalJSON(data []byte) error { + return internal.JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data)) +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal.JSONFormatError error type. +// Splitting it into a separate function allows us to do the internal.JSONFormatError → InvalidSignatureError in a single place, the caller. +func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { + var critical, optional json.RawMessage + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "critical": &critical, + "optional": &optional, + }); err != nil { + return err + } + + var creatorID string + var timestamp float64 + var gotCreatorID, gotTimestamp = false, false + if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) any { + switch key { + case "creator": + gotCreatorID = true + return &creatorID + case "timestamp": + gotTimestamp = true + return ×tamp + default: + var ignore any + return &ignore + } + }); err != nil { + return err + } + if gotCreatorID { + s.untrustedCreatorID = &creatorID + } + if gotTimestamp { + intTimestamp := int64(timestamp) + if float64(intTimestamp) != timestamp { + return internal.NewInvalidSignatureError("Field optional.timestamp is not an integer") + } + s.untrustedTimestamp = &intTimestamp + } + + var t string + var image, identity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{ + "type": &t, + "image": &image, + "identity": &identity, + }); err != nil { + return err + } + if t != signatureType { + return internal.NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t)) + } + + var digestString string + if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{ + "docker-manifest-digest": &digestString, + }); err != nil { + return err + } + digestValue, err := digest.Parse(digestString) + if err != nil { + return internal.NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err)) + } + s.untrustedDockerManifestDigest = digestValue + + return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{ + "docker-reference": &s.untrustedDockerReference, + }) +} + +// Sign formats the signature and returns a blob signed using mech and keyIdentity +// (If it seems surprising that this is a method on untrustedSignature, note that there +// isn’t a good reason to think that a key used by the user is trusted by any component +// of the system just because it is a private key — actually the presence of a private key +// on the system increases the likelihood of an a successful attack on that private key +// on that particular system.) +func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string, passphrase string) ([]byte, error) { + json, err := json.Marshal(s) + if err != nil { + return nil, err + } + + if newMech, ok := mech.(signingMechanismWithPassphrase); ok { + return newMech.SignWithPassphrase(json, keyIdentity, passphrase) + } + + if passphrase != "" { + return nil, errors.New("signing mechanism does not support passphrases") + } + + return mech.Sign(json, keyIdentity) +} + +// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable. +// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies +// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature +// because the functions have the same or similar types, so there is a risk of exchanging the functions; +// named members of this struct are more explicit. +type signatureAcceptanceRules struct { + validateKeyIdentity func(string) error + validateSignedDockerReference func(string) error + validateSignedDockerManifestDigest func(digest.Digest) error +} + +// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principal components +// match expected values, both as specified by rules, and returns it +func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) { + signed, keyIdentity, err := mech.Verify(unverifiedSignature) + if err != nil { + return nil, err + } + if err := rules.validateKeyIdentity(keyIdentity); err != nil { + return nil, err + } + + var unmatchedSignature untrustedSignature + if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { + return nil, internal.NewInvalidSignatureError(err.Error()) + } + if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.untrustedDockerManifestDigest); err != nil { + return nil, err + } + if err := rules.validateSignedDockerReference(unmatchedSignature.untrustedDockerReference); err != nil { + return nil, err + } + // signatureAcceptanceRules have accepted this value. + return &Signature{ + DockerManifestDigest: unmatchedSignature.untrustedDockerManifestDigest, + DockerReference: unmatchedSignature.untrustedDockerReference, + }, nil +} + +// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature, +// WITHOUT doing any cryptographic verification. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authority”) +func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) { + // NOTE: This should eventually do format autodetection. + mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) + if err != nil { + return nil, err + } + defer mech.Close() + + untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes) + if err != nil { + return nil, err + } + var untrustedDecodedContents untrustedSignature + if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil { + return nil, internal.NewInvalidSignatureError(err.Error()) + } + + var timestamp *time.Time // = nil + if untrustedDecodedContents.untrustedTimestamp != nil { + ts := time.Unix(*untrustedDecodedContents.untrustedTimestamp, 0) + timestamp = &ts + } + return &UntrustedSignatureInformation{ + UntrustedDockerManifestDigest: untrustedDecodedContents.untrustedDockerManifestDigest, + UntrustedDockerReference: untrustedDecodedContents.untrustedDockerReference, + UntrustedCreatorID: untrustedDecodedContents.untrustedCreatorID, + UntrustedTimestamp: timestamp, + UntrustedShortKeyIdentifier: shortKeyIdentifier, + }, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/simplesigning/signer.go b/vendor/github.com/containers/image/v5/signature/simplesigning/signer.go new file mode 100644 index 0000000000..983bbb10b5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/simplesigning/signer.go @@ -0,0 +1,105 @@ +package simplesigning + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + internalSig "github.com/containers/image/v5/internal/signature" + internalSigner "github.com/containers/image/v5/internal/signer" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/signature/signer" +) + +// simpleSigner is a signer.SignerImplementation implementation for simple signing signatures. +type simpleSigner struct { + mech signature.SigningMechanism + keyFingerprint string + passphrase string // "" if not provided. +} + +type Option func(*simpleSigner) error + +// WithKeyFingerprint returns an Option for NewSigner, specifying a key to sign with, using the provided GPG key fingerprint. +func WithKeyFingerprint(keyFingerprint string) Option { + return func(s *simpleSigner) error { + s.keyFingerprint = keyFingerprint + return nil + } +} + +// WithPassphrase returns an Option for NewSigner, specifying a passphrase for the private key. +// If this is not specified, the system may interactively prompt using a gpg-agent / pinentry. +func WithPassphrase(passphrase string) Option { + return func(s *simpleSigner) error { + // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior. + if strings.Contains(passphrase, "\n") { + return errors.New("invalid passphrase: must not contain a line break") + } + s.passphrase = passphrase + return nil + } +} + +// NewSigner returns a signature.Signer which creates “simple signing” signatures using the user’s default +// GPG configuration ($GNUPGHOME / ~/.gnupg). +// +// The set of options must identify a key to sign with, probably using a WithKeyFingerprint. +// +// The caller must call Close() on the returned Signer. +func NewSigner(opts ...Option) (*signer.Signer, error) { + mech, err := signature.NewGPGSigningMechanism() + if err != nil { + return nil, fmt.Errorf("initializing GPG: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + mech.Close() + } + }() + if err := mech.SupportsSigning(); err != nil { + return nil, fmt.Errorf("Signing not supported: %w", err) + } + + s := simpleSigner{ + mech: mech, + } + for _, o := range opts { + if err := o(&s); err != nil { + return nil, err + } + } + if s.keyFingerprint == "" { + return nil, errors.New("no key identity provided for simple signing") + } + // Ideally, we should look up (and unlock?) the key at this point already, but our current SigningMechanism API does not allow that. + + succeeded = true + return internalSigner.NewSigner(&s), nil +} + +// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. +func (s *simpleSigner) ProgressMessage() string { + return "Signing image using simple signing" +} + +// SignImageManifest creates a new signature for manifest m as dockerReference. +func (s *simpleSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (internalSig.Signature, error) { + if reference.IsNameOnly(dockerReference) { + return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String()) + } + simpleSig, err := signature.SignDockerManifestWithOptions(m, dockerReference.String(), s.mech, s.keyFingerprint, &signature.SignOptions{ + Passphrase: s.passphrase, + }) + if err != nil { + return nil, err + } + return internalSig.SimpleSigningFromBlob(simpleSig), nil +} + +func (s *simpleSigner) Close() error { + return s.mech.Close() +} diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 9a7a0da2bb..9883bde6d2 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -3,6 +3,7 @@ package types import ( "context" "io" + "net/url" "time" "github.com/containers/image/v5/docker/reference" @@ -241,6 +242,7 @@ type BlobInfoCache interface { // // WARNING: Various methods which return an object identified by digest generally do not // validate that the returned data actually matches that digest; this is the caller’s responsibility. +// See the individual methods’ documentation for potentially more details. type ImageSource interface { // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. @@ -251,10 +253,17 @@ type ImageSource interface { // It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). + // + // WARNING: This is a raw access to the data as provided by the source; if the reference contains a digest, or instanceDigest is set, + // callers must enforce the digest match themselves, typically by using image.UnparsedInstance to access the manifest instead + // of calling this directly. (Compare the generic warning applicable to all of the [ImageSource] interface.) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. + // + // WARNING: This is a raw access to the data as provided by the source; callers must validate the contents + // against the blob’s digest themselves. (Compare the generic warning applicable to all of the [ImageSource] interface.) GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. HasThreadSafeGetBlob() bool @@ -655,6 +664,8 @@ type SystemContext struct { // Note that this requires writing blobs to temporary files, and takes more time than the default behavior, // when the digest for a blob is unknown. DockerRegistryPushPrecomputeDigests bool + // DockerProxyURL specifies proxy configuration schema (like socks5://username:password@ip:port) + DockerProxyURL *url.URL // === docker/daemon.Transport overrides === // A directory containing a CA certificate (ending with ".crt"), diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 6c0cc885d5..e9d548a855 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 34 + VersionMinor = 35 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/containers/ocicrypt/.gitignore b/vendor/github.com/containers/ocicrypt/.gitignore new file mode 100644 index 0000000000..b25c15b81f --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/.gitignore @@ -0,0 +1 @@ +*~ diff --git a/vendor/github.com/containers/ocicrypt/.golangci.yml b/vendor/github.com/containers/ocicrypt/.golangci.yml new file mode 100644 index 0000000000..bf39af8368 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/.golangci.yml @@ -0,0 +1,35 @@ +linters: + enable: + - depguard + - staticcheck + - unconvert + - gofmt + - goimports + - revive + - ineffassign + - govet + - unused + - misspell + +linters-settings: + depguard: + rules: + main: + files: + - $all + deny: + - pkg: "io/ioutil" + + revive: + severity: error + rules: + - name: indent-error-flow + severity: warning + disabled: false + + - name: error-strings + disabled: false + + staticcheck: + # Suppress reports of deprecated packages + checks: ["-SA1019"] diff --git a/vendor/github.com/containers/ocicrypt/ADOPTERS.md b/vendor/github.com/containers/ocicrypt/ADOPTERS.md new file mode 100644 index 0000000000..fa4b03bb88 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/ADOPTERS.md @@ -0,0 +1,10 @@ +Below are list of adopters of the `ocicrypt` library or supports use of OCI encrypted images: +- [skopeo](https://github.com/containers/skopeo) +- [buildah](https://github.com/containers/buildah) +- [containerd](https://github.com/containerd/imgcrypt) +- [nerdctl](https://github.com/containerd/nerdctl) +- [distribution](https://github.com/distribution/distribution) + +Below are the list of projects that are in the process of adopting support: +- [quay](https://github.com/quay/quay) +- [kata-containers](https://github.com/kata-containers/kata-containers) diff --git a/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md b/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md new file mode 100644 index 0000000000..d68f8dbda4 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## The OCIcrypt Library Project Community Code of Conduct + +The OCIcrypt Library project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/ocicrypt/MAINTAINERS b/vendor/github.com/containers/ocicrypt/MAINTAINERS new file mode 100644 index 0000000000..af38d03bff --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/MAINTAINERS @@ -0,0 +1,6 @@ +# ocicrypt maintainers +# +# Github ID, Name, Email Address +lumjjb, Brandon Lum, lumjjb@gmail.com +stefanberger, Stefan Berger, stefanb@linux.ibm.com +arronwy, Arron Wang, arron.wang@intel.com diff --git a/vendor/github.com/containers/ocicrypt/Makefile b/vendor/github.com/containers/ocicrypt/Makefile new file mode 100644 index 0000000000..97ddeefbb9 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/Makefile @@ -0,0 +1,35 @@ +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: check build decoder generate-protobuf + +all: build + +FORCE: + +check: + golangci-lint run + +build: vendor + go build ./... + +vendor: + go mod tidy + +test: + go clean -testcache + go test ./... -test.v + +generate-protobuf: + protoc -I utils/keyprovider/ utils/keyprovider/keyprovider.proto --go_out=plugins=grpc:utils/keyprovider diff --git a/vendor/github.com/containers/ocicrypt/README.md b/vendor/github.com/containers/ocicrypt/README.md new file mode 100644 index 0000000000..b69d14e3b8 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/README.md @@ -0,0 +1,50 @@ +# OCIcrypt Library + +The `ocicrypt` library is the OCI image spec implementation of container image encryption. More details of the spec can be seen in the [OCI repository](https://github.com/opencontainers/image-spec/pull/775). The purpose of this library is to encode spec structures and consts in code, as well as provide a consistent implementation of image encryption across container runtimes and build tools. + +Consumers of OCIcrypt: + +- [containerd/imgcrypt](https://github.com/containerd/imgcrypt) +- [cri-o](https://github.com/cri-o/cri-o) +- [skopeo](https://github.com/containers/skopeo) + + +## Usage + +There are various levels of usage for this library. The main consumers of these would be runtime/build tools, and a more specific use would be in the ability to extend cryptographic function. + +### Runtime/Build tool usage + +The general exposed interface a runtime/build tool would use, would be to perform encryption or decryption of layers: + +``` +package "github.com/containers/ocicrypt" +func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, desc ocispec.Descriptor) (io.Reader, EncryptLayerFinalizer, error) +func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error) +``` + +The settings/parameters to these functions can be specified via creation of an encryption config with the `github.com/containers/ocicrypt/config` package. We note that because setting of annotations and other fields of the layer descriptor is done through various means in different runtimes/build tools, it is the responsibility of the caller to still ensure that the layer descriptor follows the OCI specification (i.e. encoding, setting annotations, etc.). + + +### Crypto Agility and Extensibility + +The implementation for both symmetric and asymmetric encryption used in this library are behind 2 main interfaces, which users can extend if need be. These are in the following packages: +- github.com/containers/ocicrypt/blockcipher - LayerBlockCipher interface for block ciphers +- github.com/containers/ocicrypt/keywrap - KeyWrapper interface for key wrapping + +We note that adding interfaces here is risky outside the OCI spec is not recommended, unless for very specialized and confined usecases. Please open an issue or PR if there is a general usecase that could be added to the OCI spec. + + +#### Keyprovider interface + +As part of the keywrap interface, there is a [keyprovider](https://github.com/containers/ocicrypt/blob/main/docs/keyprovider.md) implementation that allows one to call out to a binary or service. + + +## Security Issues + +We consider security issues related to this library critical. Please report and security related issues by emailing maintainers in the [MAINTAINERS](MAINTAINERS) file. + + +## Ocicrypt Pkcs11 Support + +Ocicrypt Pkcs11 support is currently experiemental. For more details, please refer to the [this document](docs/pkcs11.md). diff --git a/vendor/github.com/containers/ocicrypt/SECURITY.md b/vendor/github.com/containers/ocicrypt/SECURITY.md new file mode 100644 index 0000000000..ea98cb1293 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/SECURITY.md @@ -0,0 +1,3 @@ +## Security and Disclosure Information Policy for the OCIcrypt Library Project + +The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go new file mode 100644 index 0000000000..b8436a8d5b --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go @@ -0,0 +1,160 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package blockcipher + +import ( + "errors" + "fmt" + "io" + + "github.com/opencontainers/go-digest" +) + +// LayerCipherType is the ciphertype as specified in the layer metadata +type LayerCipherType string + +// TODO: Should be obtained from OCI spec once included +const ( + AES256CTR LayerCipherType = "AES_256_CTR_HMAC_SHA256" +) + +// PrivateLayerBlockCipherOptions includes the information required to encrypt/decrypt +// an image which are sensitive and should not be in plaintext +type PrivateLayerBlockCipherOptions struct { + // SymmetricKey represents the symmetric key used for encryption/decryption + // This field should be populated by Encrypt/Decrypt calls + SymmetricKey []byte `json:"symkey"` + + // Digest is the digest of the original data for verification. + // This is NOT populated by Encrypt/Decrypt calls + Digest digest.Digest `json:"digest"` + + // CipherOptions contains the cipher metadata used for encryption/decryption + // This field should be populated by Encrypt/Decrypt calls + CipherOptions map[string][]byte `json:"cipheroptions"` +} + +// PublicLayerBlockCipherOptions includes the information required to encrypt/decrypt +// an image which are public and can be deduplicated in plaintext across multiple +// recipients +type PublicLayerBlockCipherOptions struct { + // CipherType denotes the cipher type according to the list of OCI suppported + // cipher types. + CipherType LayerCipherType `json:"cipher"` + + // Hmac contains the hmac string to help verify encryption + Hmac []byte `json:"hmac"` + + // CipherOptions contains the cipher metadata used for encryption/decryption + // This field should be populated by Encrypt/Decrypt calls + CipherOptions map[string][]byte `json:"cipheroptions"` +} + +// LayerBlockCipherOptions contains the public and private LayerBlockCipherOptions +// required to encrypt/decrypt an image +type LayerBlockCipherOptions struct { + Public PublicLayerBlockCipherOptions + Private PrivateLayerBlockCipherOptions +} + +// LayerBlockCipher returns a provider for encrypt/decrypt functionality +// for handling the layer data for a specific algorithm +type LayerBlockCipher interface { + // GenerateKey creates a symmetric key + GenerateKey() ([]byte, error) + // Encrypt takes in layer data and returns the ciphertext and relevant LayerBlockCipherOptions + Encrypt(layerDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, Finalizer, error) + // Decrypt takes in layer ciphertext data and returns the plaintext and relevant LayerBlockCipherOptions + Decrypt(layerDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) +} + +// LayerBlockCipherHandler is the handler for encrypt/decrypt for layers +type LayerBlockCipherHandler struct { + cipherMap map[LayerCipherType]LayerBlockCipher +} + +// Finalizer is called after data blobs are written, and returns the LayerBlockCipherOptions for the encrypted blob +type Finalizer func() (LayerBlockCipherOptions, error) + +// GetOpt returns the value of the cipher option and if the option exists +func (lbco LayerBlockCipherOptions) GetOpt(key string) (value []byte, ok bool) { + if v, ok := lbco.Public.CipherOptions[key]; ok { + return v, ok + } else if v, ok := lbco.Private.CipherOptions[key]; ok { + return v, ok + } + return nil, false +} + +func wrapFinalizerWithType(fin Finalizer, typ LayerCipherType) Finalizer { + return func() (LayerBlockCipherOptions, error) { + lbco, err := fin() + if err != nil { + return LayerBlockCipherOptions{}, err + } + lbco.Public.CipherType = typ + return lbco, err + } +} + +// Encrypt is the handler for the layer decryption routine +func (h *LayerBlockCipherHandler) Encrypt(plainDataReader io.Reader, typ LayerCipherType) (io.Reader, Finalizer, error) { + if c, ok := h.cipherMap[typ]; ok { + sk, err := c.GenerateKey() + if err != nil { + return nil, nil, err + } + opt := LayerBlockCipherOptions{ + Private: PrivateLayerBlockCipherOptions{ + SymmetricKey: sk, + }, + } + encDataReader, fin, err := c.Encrypt(plainDataReader, opt) + if err == nil { + fin = wrapFinalizerWithType(fin, typ) + } + return encDataReader, fin, err + } + return nil, nil, fmt.Errorf("unsupported cipher type: %s", typ) +} + +// Decrypt is the handler for the layer decryption routine +func (h *LayerBlockCipherHandler) Decrypt(encDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) { + typ := opt.Public.CipherType + if typ == "" { + return nil, LayerBlockCipherOptions{}, errors.New("no cipher type provided") + } + if c, ok := h.cipherMap[typ]; ok { + return c.Decrypt(encDataReader, opt) + } + return nil, LayerBlockCipherOptions{}, fmt.Errorf("unsupported cipher type: %s", typ) +} + +// NewLayerBlockCipherHandler returns a new default handler +func NewLayerBlockCipherHandler() (*LayerBlockCipherHandler, error) { + h := LayerBlockCipherHandler{ + cipherMap: map[LayerCipherType]LayerBlockCipher{}, + } + + var err error + h.cipherMap[AES256CTR], err = NewAESCTRLayerBlockCipher(256) + if err != nil { + return nil, fmt.Errorf("unable to set up Cipher AES-256-CTR: %w", err) + } + + return &h, nil +} diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go new file mode 100644 index 0000000000..7db03e2ecd --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go @@ -0,0 +1,193 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package blockcipher + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "errors" + "fmt" + "hash" + "io" + + "github.com/containers/ocicrypt/utils" +) + +// AESCTRLayerBlockCipher implements the AES CTR stream cipher +type AESCTRLayerBlockCipher struct { + keylen int // in bytes + reader io.Reader + encrypt bool + stream cipher.Stream + err error + hmac hash.Hash + expHmac []byte + doneEncrypting bool +} + +type aesctrcryptor struct { + bc *AESCTRLayerBlockCipher +} + +// NewAESCTRLayerBlockCipher returns a new AES SIV block cipher of 256 or 512 bits +func NewAESCTRLayerBlockCipher(bits int) (LayerBlockCipher, error) { + if bits != 256 { + return nil, errors.New("AES CTR bit count not supported") + } + return &AESCTRLayerBlockCipher{keylen: bits / 8}, nil +} + +func (r *aesctrcryptor) Read(p []byte) (int, error) { + var ( + o int + ) + + if r.bc.err != nil { + return 0, r.bc.err + } + + o, err := utils.FillBuffer(r.bc.reader, p) + if err != nil { + if err == io.EOF { + r.bc.err = err + } else { + return 0, err + } + } + + if !r.bc.encrypt { + if _, err := r.bc.hmac.Write(p[:o]); err != nil { + r.bc.err = fmt.Errorf("could not write to hmac: %w", err) + return 0, r.bc.err + } + + if r.bc.err == io.EOF { + // Before we return EOF we let the HMAC comparison + // provide a verdict + if !hmac.Equal(r.bc.hmac.Sum(nil), r.bc.expHmac) { + r.bc.err = fmt.Errorf("could not properly decrypt byte stream; exp hmac: '%x', actual hmac: '%s'", r.bc.expHmac, r.bc.hmac.Sum(nil)) + return 0, r.bc.err + } + } + } + + r.bc.stream.XORKeyStream(p[:o], p[:o]) + + if r.bc.encrypt { + if _, err := r.bc.hmac.Write(p[:o]); err != nil { + r.bc.err = fmt.Errorf("could not write to hmac: %w", err) + return 0, r.bc.err + } + + if r.bc.err == io.EOF { + // Final data encrypted; Do the 'then-MAC' part + r.bc.doneEncrypting = true + } + } + + return o, r.bc.err +} + +// init initializes an instance +func (bc *AESCTRLayerBlockCipher) init(encrypt bool, reader io.Reader, opts LayerBlockCipherOptions) (LayerBlockCipherOptions, error) { + var ( + err error + ) + + key := opts.Private.SymmetricKey + if len(key) != bc.keylen { + return LayerBlockCipherOptions{}, fmt.Errorf("invalid key length of %d bytes; need %d bytes", len(key), bc.keylen) + } + + nonce, ok := opts.GetOpt("nonce") + if !ok { + nonce = make([]byte, aes.BlockSize) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return LayerBlockCipherOptions{}, fmt.Errorf("unable to generate random nonce: %w", err) + } + } + + block, err := aes.NewCipher(key) + if err != nil { + return LayerBlockCipherOptions{}, fmt.Errorf("aes.NewCipher failed: %w", err) + } + + bc.reader = reader + bc.encrypt = encrypt + bc.stream = cipher.NewCTR(block, nonce) + bc.err = nil + bc.hmac = hmac.New(sha256.New, key) + bc.expHmac = opts.Public.Hmac + bc.doneEncrypting = false + + if !encrypt && len(bc.expHmac) == 0 { + return LayerBlockCipherOptions{}, errors.New("HMAC is not provided for decryption process") + } + + lbco := LayerBlockCipherOptions{ + Private: PrivateLayerBlockCipherOptions{ + SymmetricKey: key, + CipherOptions: map[string][]byte{ + "nonce": nonce, + }, + }, + } + + return lbco, nil +} + +// GenerateKey creates a synmmetric key +func (bc *AESCTRLayerBlockCipher) GenerateKey() ([]byte, error) { + key := make([]byte, bc.keylen) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + return nil, err + } + return key, nil +} + +// Encrypt takes in layer data and returns the ciphertext and relevant LayerBlockCipherOptions +func (bc *AESCTRLayerBlockCipher) Encrypt(plainDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, Finalizer, error) { + lbco, err := bc.init(true, plainDataReader, opt) + if err != nil { + return nil, nil, err + } + + finalizer := func() (LayerBlockCipherOptions, error) { + if !bc.doneEncrypting { + return LayerBlockCipherOptions{}, errors.New("Read()ing not complete, unable to finalize") + } + if lbco.Public.CipherOptions == nil { + lbco.Public.CipherOptions = map[string][]byte{} + } + lbco.Public.Hmac = bc.hmac.Sum(nil) + return lbco, nil + } + return &aesctrcryptor{bc}, finalizer, nil +} + +// Decrypt takes in layer ciphertext data and returns the plaintext and relevant LayerBlockCipherOptions +func (bc *AESCTRLayerBlockCipher) Decrypt(encDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) { + lbco, err := bc.init(false, encDataReader, opt) + if err != nil { + return nil, LayerBlockCipherOptions{}, err + } + + return utils.NewDelayedReader(&aesctrcryptor{bc}, 1024*10), lbco, nil +} diff --git a/vendor/github.com/containers/ocicrypt/config/config.go b/vendor/github.com/containers/ocicrypt/config/config.go new file mode 100644 index 0000000000..d960766ebe --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/config.go @@ -0,0 +1,114 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +// EncryptConfig is the container image PGP encryption configuration holding +// the identifiers of those that will be able to decrypt the container and +// the PGP public keyring file data that contains their public keys. +type EncryptConfig struct { + // map holding 'gpg-recipients', 'gpg-pubkeyringfile', 'pubkeys', 'x509s' + Parameters map[string][][]byte + + DecryptConfig DecryptConfig +} + +// DecryptConfig wraps the Parameters map that holds the decryption key +type DecryptConfig struct { + // map holding 'privkeys', 'x509s', 'gpg-privatekeys' + Parameters map[string][][]byte +} + +// CryptoConfig is a common wrapper for EncryptConfig and DecrypConfig that can +// be passed through functions that share much code for encryption and decryption +type CryptoConfig struct { + EncryptConfig *EncryptConfig + DecryptConfig *DecryptConfig +} + +// InitDecryption initialized a CryptoConfig object with parameters used for decryption +func InitDecryption(dcparameters map[string][][]byte) CryptoConfig { + return CryptoConfig{ + DecryptConfig: &DecryptConfig{ + Parameters: dcparameters, + }, + } +} + +// InitEncryption initializes a CryptoConfig object with parameters used for encryption +// It also takes dcparameters that may be needed for decryption when adding a recipient +// to an already encrypted image +func InitEncryption(parameters, dcparameters map[string][][]byte) CryptoConfig { + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: parameters, + DecryptConfig: DecryptConfig{ + Parameters: dcparameters, + }, + }, + } +} + +// CombineCryptoConfigs takes a CryptoConfig list and creates a single CryptoConfig +// containing the crypto configuration of all the key bundles +func CombineCryptoConfigs(ccs []CryptoConfig) CryptoConfig { + ecparam := map[string][][]byte{} + ecdcparam := map[string][][]byte{} + dcparam := map[string][][]byte{} + + for _, cc := range ccs { + if ec := cc.EncryptConfig; ec != nil { + addToMap(ecparam, ec.Parameters) + addToMap(ecdcparam, ec.DecryptConfig.Parameters) + } + + if dc := cc.DecryptConfig; dc != nil { + addToMap(dcparam, dc.Parameters) + } + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ecparam, + DecryptConfig: DecryptConfig{ + Parameters: ecdcparam, + }, + }, + DecryptConfig: &DecryptConfig{ + Parameters: dcparam, + }, + } + +} + +// AttachDecryptConfig adds DecryptConfig to the field of EncryptConfig so that +// the decryption parameters can be used to add recipients to an existing image +// if the user is able to decrypt it. +func (ec *EncryptConfig) AttachDecryptConfig(dc *DecryptConfig) { + if dc != nil { + addToMap(ec.DecryptConfig.Parameters, dc.Parameters) + } +} + +func addToMap(orig map[string][][]byte, add map[string][][]byte) { + for k, v := range add { + if ov, ok := orig[k]; ok { + orig[k] = append(ov, v...) + } else { + orig[k] = v + } + } +} diff --git a/vendor/github.com/containers/ocicrypt/config/constructors.go b/vendor/github.com/containers/ocicrypt/config/constructors.go new file mode 100644 index 0000000000..f7f29cd8d9 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/constructors.go @@ -0,0 +1,246 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +import ( + "errors" + "fmt" + "strings" + + "github.com/containers/ocicrypt/crypto/pkcs11" + "gopkg.in/yaml.v3" +) + +// EncryptWithJwe returns a CryptoConfig to encrypt with jwe public keys +func EncryptWithJwe(pubKeys [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := map[string][][]byte{ + "pubkeys": pubKeys, + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithPkcs7 returns a CryptoConfig to encrypt with pkcs7 x509 certs +func EncryptWithPkcs7(x509s [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + + ep := map[string][][]byte{ + "x509s": x509s, + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithGpg returns a CryptoConfig to encrypt with configured gpg parameters +func EncryptWithGpg(gpgRecipients [][]byte, gpgPubRingFile []byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := map[string][][]byte{ + "gpg-recipients": gpgRecipients, + "gpg-pubkeyringfile": {gpgPubRingFile}, + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithPkcs11 returns a CryptoConfig to encrypt with configured pkcs11 parameters +func EncryptWithPkcs11(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Pubkeys, pkcs11Yamls [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := map[string][][]byte{} + + if len(pkcs11Yamls) > 0 { + if pkcs11Config == nil { + return CryptoConfig{}, errors.New("pkcs11Config must not be nil") + } + p11confYaml, err := yaml.Marshal(pkcs11Config) + if err != nil { + return CryptoConfig{}, fmt.Errorf("Could not marshal Pkcs11Config to Yaml: %w", err) + } + + dc = DecryptConfig{ + Parameters: map[string][][]byte{ + "pkcs11-config": {p11confYaml}, + }, + } + ep["pkcs11-yamls"] = pkcs11Yamls + } + if len(pkcs11Pubkeys) > 0 { + ep["pkcs11-pubkeys"] = pkcs11Pubkeys + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithKeyProvider returns a CryptoConfig to encrypt with configured keyprovider parameters +func EncryptWithKeyProvider(keyProviders [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := make(map[string][][]byte) + for _, keyProvider := range keyProviders { + keyProvidersStr := string(keyProvider) + idx := strings.Index(keyProvidersStr, ":") + if idx > 0 { + ep[keyProvidersStr[:idx]] = append(ep[keyProvidersStr[:idx]], []byte(keyProvidersStr[idx+1:])) + } else { + ep[keyProvidersStr] = append(ep[keyProvidersStr], []byte("Enabled")) + } + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithKeyProvider returns a CryptoConfig to decrypt with configured keyprovider parameters +func DecryptWithKeyProvider(keyProviders [][]byte) (CryptoConfig, error) { + dp := make(map[string][][]byte) + ep := map[string][][]byte{} + for _, keyProvider := range keyProviders { + keyProvidersStr := string(keyProvider) + idx := strings.Index(keyProvidersStr, ":") + if idx > 0 { + dp[keyProvidersStr[:idx]] = append(dp[keyProvidersStr[:idx]], []byte(keyProvidersStr[idx+1:])) + } else { + dp[keyProvidersStr] = append(dp[keyProvidersStr], []byte("Enabled")) + } + } + dc := DecryptConfig{ + Parameters: dp, + } + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithPrivKeys returns a CryptoConfig to decrypt with configured private keys +func DecryptWithPrivKeys(privKeys [][]byte, privKeysPasswords [][]byte) (CryptoConfig, error) { + if len(privKeys) != len(privKeysPasswords) { + return CryptoConfig{}, errors.New("Length of privKeys should match length of privKeysPasswords") + } + + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "privkeys": privKeys, + "privkeys-passwords": privKeysPasswords, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithX509s returns a CryptoConfig to decrypt with configured x509 certs +func DecryptWithX509s(x509s [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "x509s": x509s, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithGpgPrivKeys returns a CryptoConfig to decrypt with configured gpg private keys +func DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeysPwds [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "gpg-privatekeys": gpgPrivKeys, + "gpg-privatekeys-passwords": gpgPrivKeysPwds, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithPkcs11Yaml returns a CryptoConfig to decrypt with pkcs11 YAML formatted key files +func DecryptWithPkcs11Yaml(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Yamls [][]byte) (CryptoConfig, error) { + p11confYaml, err := yaml.Marshal(pkcs11Config) + if err != nil { + return CryptoConfig{}, fmt.Errorf("Could not marshal Pkcs11Config to Yaml: %w", err) + } + + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "pkcs11-yamls": pkcs11Yamls, + "pkcs11-config": {p11confYaml}, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} diff --git a/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go b/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go new file mode 100644 index 0000000000..4785a831b5 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go @@ -0,0 +1,80 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +import ( + "encoding/json" + "fmt" + "os" +) + +// Command describes the structure of command, it consist of path and args, where path defines the location of +// binary executable and args are passed on to the binary executable +type Command struct { + Path string `json:"path,omitempty"` + Args []string `json:"args,omitempty"` +} + +// KeyProviderAttrs describes the structure of key provider, it defines the way of invocation to key provider +type KeyProviderAttrs struct { + Command *Command `json:"cmd,omitempty"` + Grpc string `json:"grpc,omitempty"` +} + +// OcicryptConfig represents the format of an ocicrypt_provider.conf config file +type OcicryptConfig struct { + KeyProviderConfig map[string]KeyProviderAttrs `json:"key-providers"` +} + +const ENVVARNAME = "OCICRYPT_KEYPROVIDER_CONFIG" + +// parseConfigFile parses a configuration file; it is not an error if the configuration file does +// not exist, so no error is returned. +func parseConfigFile(filename string) (*OcicryptConfig, error) { + // a non-existent config file is not an error + _, err := os.Stat(filename) + if os.IsNotExist(err) { + return nil, nil + } + + data, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + + ic := &OcicryptConfig{} + err = json.Unmarshal(data, ic) + return ic, err +} + +// getConfiguration tries to read the configuration file at the following locations +// ${OCICRYPT_KEYPROVIDER_CONFIG} == "/etc/ocicrypt_keyprovider.yaml" +// If no configuration file could be found or read a null pointer is returned +func GetConfiguration() (*OcicryptConfig, error) { + var ic *OcicryptConfig + var err error + filename := os.Getenv(ENVVARNAME) + if len(filename) > 0 { + ic, err = parseConfigFile(filename) + if err != nil { + return nil, fmt.Errorf("Error while parsing keyprovider config file: %w", err) + } + } else { + return nil, nil + } + return ic, nil +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go new file mode 100644 index 0000000000..072d7fe184 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go @@ -0,0 +1,134 @@ +/* + Copyright The ocicrypt Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "fmt" + + pkcs11uri "github.com/stefanberger/go-pkcs11uri" + "gopkg.in/yaml.v3" +) + +// Pkcs11KeyFile describes the format of the pkcs11 (private) key file. +// It also carries pkcs11 module related environment variables that are transferred to the +// Pkcs11URI object and activated when the pkcs11 module is used. +type Pkcs11KeyFile struct { + Pkcs11 struct { + Uri string `yaml:"uri"` + } `yaml:"pkcs11"` + Module struct { + Env map[string]string `yaml:"env,omitempty"` + } `yaml:"module"` +} + +// Pkcs11KeyFileObject is a representation of the Pkcs11KeyFile with the pkcs11 URI as an object +type Pkcs11KeyFileObject struct { + Uri *pkcs11uri.Pkcs11URI +} + +// ParsePkcs11Uri parses a pkcs11 URI +func ParsePkcs11Uri(uri string) (*pkcs11uri.Pkcs11URI, error) { + p11uri := pkcs11uri.New() + err := p11uri.Parse(uri) + if err != nil { + return nil, fmt.Errorf("Could not parse Pkcs11URI from file: %w", err) + } + return p11uri, err +} + +// ParsePkcs11KeyFile parses a pkcs11 key file holding a pkcs11 URI describing a private key. +// The file has the following yaml format: +// pkcs11: +// - uri : +// An error is returned if the pkcs11 URI is malformed +func ParsePkcs11KeyFile(yamlstr []byte) (*Pkcs11KeyFileObject, error) { + p11keyfile := Pkcs11KeyFile{} + + err := yaml.Unmarshal(yamlstr, &p11keyfile) + if err != nil { + return nil, fmt.Errorf("Could not unmarshal pkcs11 keyfile: %w", err) + } + + p11uri, err := ParsePkcs11Uri(p11keyfile.Pkcs11.Uri) + if err != nil { + return nil, err + } + p11uri.SetEnvMap(p11keyfile.Module.Env) + + return &Pkcs11KeyFileObject{Uri: p11uri}, err +} + +// IsPkcs11PrivateKey checks whether the given YAML represents a Pkcs11 private key +func IsPkcs11PrivateKey(yamlstr []byte) bool { + _, err := ParsePkcs11KeyFile(yamlstr) + return err == nil +} + +// IsPkcs11PublicKey checks whether the given YAML represents a Pkcs11 public key +func IsPkcs11PublicKey(yamlstr []byte) bool { + _, err := ParsePkcs11KeyFile(yamlstr) + return err == nil +} + +// Pkcs11Config describes the layout of a pkcs11 config file +// The file has the following yaml format: +// module-directories: +// - /usr/lib64/pkcs11/ +// allowd-module-paths +// - /usr/lib64/pkcs11/libsofthsm2.so +type Pkcs11Config struct { + ModuleDirectories []string `yaml:"module-directories"` + AllowedModulePaths []string `yaml:"allowed-module-paths"` +} + +// GetDefaultModuleDirectories returns module directories covering +// a variety of Linux distros +func GetDefaultModuleDirectories() []string { + dirs := []string{ + "/usr/lib64/pkcs11/", // Fedora,RHEL,openSUSE + "/usr/lib/pkcs11/", // Fedora,ArchLinux + "/usr/local/lib/pkcs11/", + "/usr/lib/softhsm/", // Debian,Ubuntu + } + + // Debian directory: /usr/lib/(x86_64|aarch64|arm|powerpc64le|riscv64|s390x)-linux-gnu/ + hosttype, ostype, q := getHostAndOsType() + if len(hosttype) > 0 { + dir := fmt.Sprintf("/usr/lib/%s-%s-%s/", hosttype, ostype, q) + dirs = append(dirs, dir) + } + return dirs +} + +// GetDefaultModuleDirectoresFormatted returns the default module directories formatted for YAML +func GetDefaultModuleDirectoriesYaml(indent string) string { + res := "" + + for _, dir := range GetDefaultModuleDirectories() { + res += indent + "- " + dir + "\n" + } + return res +} + +// ParsePkcs11ConfigFile parses a pkcs11 config file hat influences the module search behavior +// as well as the set of modules that users are allowed to use +func ParsePkcs11ConfigFile(yamlstr []byte) (*Pkcs11Config, error) { + p11conf := Pkcs11Config{} + + err := yaml.Unmarshal(yamlstr, &p11conf) + if err != nil { + return &p11conf, fmt.Errorf("Could not parse Pkcs11Config: %w", err) + } + return &p11conf, nil +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go new file mode 100644 index 0000000000..fe047a1e62 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go @@ -0,0 +1,485 @@ +//go:build cgo +// +build cgo + +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "net/url" + "os" + "strconv" + "strings" + + "github.com/miekg/pkcs11" + pkcs11uri "github.com/stefanberger/go-pkcs11uri" +) + +var ( + // OAEPLabel defines the label we use for OAEP encryption; this cannot be changed + OAEPLabel = []byte("") + + // OAEPSha1Params describes the OAEP parameters with sha1 hash algorithm; needed by SoftHSM + OAEPSha1Params = &pkcs11.OAEPParams{ + HashAlg: pkcs11.CKM_SHA_1, + MGF: pkcs11.CKG_MGF1_SHA1, + SourceType: pkcs11.CKZ_DATA_SPECIFIED, + SourceData: OAEPLabel, + } + // OAEPSha256Params describes the OAEP parameters with sha256 hash algorithm + OAEPSha256Params = &pkcs11.OAEPParams{ + HashAlg: pkcs11.CKM_SHA256, + MGF: pkcs11.CKG_MGF1_SHA256, + SourceType: pkcs11.CKZ_DATA_SPECIFIED, + SourceData: OAEPLabel, + } +) + +// rsaPublicEncryptOAEP encrypts the given plaintext with the given *rsa.PublicKey; the +// environment variable OCICRYPT_OAEP_HASHALG can be set to 'sha1' to force usage of sha1 for OAEP (SoftHSM). +// This function is needed by clients who are using a public key file for pkcs11 encryption +func rsaPublicEncryptOAEP(pubKey *rsa.PublicKey, plaintext []byte) ([]byte, string, error) { + var ( + hashfunc hash.Hash + hashalg string + ) + + oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG") + // The default is sha256 (previously was sha1) + switch strings.ToLower(oaephash) { + case "sha1": + hashfunc = sha1.New() + hashalg = "sha1" + case "sha256", "": + hashfunc = sha256.New() + hashalg = "sha256" + default: + return nil, "", fmt.Errorf("Unsupported OAEP hash '%s'", oaephash) + } + ciphertext, err := rsa.EncryptOAEP(hashfunc, rand.Reader, pubKey, plaintext, OAEPLabel) + if err != nil { + return nil, "", fmt.Errorf("rss.EncryptOAEP failed: %w", err) + } + + return ciphertext, hashalg, nil +} + +// pkcs11UriGetLoginParameters gets the parameters necessary for login from the Pkcs11URI +// PIN and module are mandatory; slot-id is optional and if not found -1 will be returned +// For a privateKeyOperation a PIN is required and if none is given, this function will return an error +func pkcs11UriGetLoginParameters(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (string, string, int64, error) { + var ( + pin string + err error + ) + if privateKeyOperation { + if !p11uri.HasPIN() { + return "", "", 0, errors.New("Missing PIN for private key operation") + } + } + // some devices require a PIN to find a *public* key object, others don't + pin, _ = p11uri.GetPIN() + + module, err := p11uri.GetModule() + if err != nil { + return "", "", 0, fmt.Errorf("No module available in pkcs11 URI: %w", err) + } + + slotid := int64(-1) + + slot, ok := p11uri.GetPathAttribute("slot-id", false) + if ok { + slotid, err = strconv.ParseInt(slot, 10, 64) + if err != nil { + return "", "", 0, fmt.Errorf("slot-id is not a valid number: %w", err) + } + if slotid < 0 { + return "", "", 0, fmt.Errorf("slot-id is a negative number") + } + if uint64(slotid) > 0xffffffff { + return "", "", 0, fmt.Errorf("slot-id is larger than 32 bit") + } + } + + return pin, module, slotid, nil +} + +// pkcs11UriGetKeyIdAndLabel gets the key label by retrieving the value of the 'object' attribute +func pkcs11UriGetKeyIdAndLabel(p11uri *pkcs11uri.Pkcs11URI) (string, string, error) { + keyid, ok2 := p11uri.GetPathAttribute("id", false) + label, ok1 := p11uri.GetPathAttribute("object", false) + if !ok1 && !ok2 { + return "", "", errors.New("Neither 'id' nor 'object' attributes were found in pkcs11 URI") + } + return keyid, label, nil +} + +// pkcs11OpenSession opens a session with a pkcs11 device at the given slot and logs in with the given PIN +func pkcs11OpenSession(p11ctx *pkcs11.Ctx, slotid uint, pin string) (session pkcs11.SessionHandle, err error) { + session, err = p11ctx.OpenSession(slotid, pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) + if err != nil { + return 0, fmt.Errorf("OpenSession to slot %d failed: %w", slotid, err) + } + if len(pin) > 0 { + err = p11ctx.Login(session, pkcs11.CKU_USER, pin) + if err != nil { + _ = p11ctx.CloseSession(session) + return 0, fmt.Errorf("Could not login to device: %w", err) + } + } + return session, nil +} + +// pkcs11UriLogin uses the given pkcs11 URI to select the pkcs11 module (shared library) and to get +// the PIN to use for login; if the URI contains a slot-id, the given slot-id will be used, otherwise +// one slot after the other will be attempted and the first one where login succeeds will be used +func pkcs11UriLogin(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (ctx *pkcs11.Ctx, session pkcs11.SessionHandle, err error) { + pin, module, slotid, err := pkcs11UriGetLoginParameters(p11uri, privateKeyOperation) + if err != nil { + return nil, 0, err + } + + p11ctx := pkcs11.New(module) + if p11ctx == nil { + return nil, 0, errors.New("Please check module path, input is: " + module) + } + + err = p11ctx.Initialize() + if err != nil { + p11Err := err.(pkcs11.Error) + if p11Err != pkcs11.CKR_CRYPTOKI_ALREADY_INITIALIZED { + return nil, 0, fmt.Errorf("Initialize failed: %w", err) + } + } + + if slotid >= 0 { + session, err := pkcs11OpenSession(p11ctx, uint(slotid), pin) + return p11ctx, session, err + } + + slots, err := p11ctx.GetSlotList(true) + if err != nil { + return nil, 0, fmt.Errorf("GetSlotList failed: %w", err) + } + + tokenlabel, ok := p11uri.GetPathAttribute("token", false) + if !ok { + return nil, 0, errors.New("Missing 'token' attribute since 'slot-id' was not given") + } + + for _, slot := range slots { + ti, err := p11ctx.GetTokenInfo(slot) + if err != nil || ti.Label != tokenlabel { + continue + } + + session, err = pkcs11OpenSession(p11ctx, slot, pin) + if err == nil { + return p11ctx, session, err + } + } + if len(pin) > 0 { + return nil, 0, errors.New("Could not create session to any slot and/or log in") + } + return nil, 0, errors.New("Could not create session to any slot") +} + +func pkcs11Logout(ctx *pkcs11.Ctx, session pkcs11.SessionHandle) { + _ = ctx.Logout(session) + _ = ctx.CloseSession(session) + _ = ctx.Finalize() + ctx.Destroy() +} + +// findObject finds an object of the given class with the given keyid and/or label +func findObject(p11ctx *pkcs11.Ctx, session pkcs11.SessionHandle, class uint, keyid, label string) (pkcs11.ObjectHandle, error) { + msg := "" + + template := []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_CLASS, class), + } + if len(label) > 0 { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_LABEL, label)) + msg = fmt.Sprintf("label '%s'", label) + } + if len(keyid) > 0 { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_ID, keyid)) + if len(msg) > 0 { + msg += " and " + } + msg += url.PathEscape(keyid) + } + + if err := p11ctx.FindObjectsInit(session, template); err != nil { + return 0, fmt.Errorf("FindObjectsInit failed: %w", err) + } + + obj, _, err := p11ctx.FindObjects(session, 100) + if err != nil { + return 0, fmt.Errorf("FindObjects failed: %w", err) + } + + if err := p11ctx.FindObjectsFinal(session); err != nil { + return 0, fmt.Errorf("FindObjectsFinal failed: %w", err) + } + if len(obj) > 1 { + return 0, fmt.Errorf("There are too many (=%d) keys with %s", len(obj), msg) + } else if len(obj) == 1 { + return obj[0], nil + } + + return 0, fmt.Errorf("Could not find any object with %s", msg) +} + +// publicEncryptOAEP uses a public key described by a pkcs11 URI to OAEP encrypt the given plaintext +func publicEncryptOAEP(pubKey *Pkcs11KeyFileObject, plaintext []byte) ([]byte, string, error) { + oldenv, err := setEnvVars(pubKey.Uri.GetEnvMap()) + if err != nil { + return nil, "", err + } + defer restoreEnv(oldenv) + + p11ctx, session, err := pkcs11UriLogin(pubKey.Uri, false) + if err != nil { + return nil, "", err + } + defer pkcs11Logout(p11ctx, session) + + keyid, label, err := pkcs11UriGetKeyIdAndLabel(pubKey.Uri) + if err != nil { + return nil, "", err + } + + p11PubKey, err := findObject(p11ctx, session, pkcs11.CKO_PUBLIC_KEY, keyid, label) + if err != nil { + return nil, "", err + } + + var hashalg string + + var oaep *pkcs11.OAEPParams + oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG") + // The default is sha256 (previously was sha1) + switch strings.ToLower(oaephash) { + case "sha1": + oaep = OAEPSha1Params + hashalg = "sha1" + case "sha256", "": + oaep = OAEPSha256Params + hashalg = "sha256" + default: + return nil, "", fmt.Errorf("Unsupported OAEP hash '%s'", oaephash) + } + + err = p11ctx.EncryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PubKey) + if err != nil { + return nil, "", fmt.Errorf("EncryptInit error: %w", err) + } + + ciphertext, err := p11ctx.Encrypt(session, plaintext) + if err != nil { + return nil, "", fmt.Errorf("Encrypt failed: %w", err) + } + return ciphertext, hashalg, nil +} + +// privateDecryptOAEP uses a pkcs11 URI describing a private key to OAEP decrypt a ciphertext +func privateDecryptOAEP(privKeyObj *Pkcs11KeyFileObject, ciphertext []byte, hashalg string) ([]byte, error) { + oldenv, err := setEnvVars(privKeyObj.Uri.GetEnvMap()) + if err != nil { + return nil, err + } + defer restoreEnv(oldenv) + + p11ctx, session, err := pkcs11UriLogin(privKeyObj.Uri, true) + if err != nil { + return nil, err + } + defer pkcs11Logout(p11ctx, session) + + keyid, label, err := pkcs11UriGetKeyIdAndLabel(privKeyObj.Uri) + if err != nil { + return nil, err + } + + p11PrivKey, err := findObject(p11ctx, session, pkcs11.CKO_PRIVATE_KEY, keyid, label) + if err != nil { + return nil, err + } + + var oaep *pkcs11.OAEPParams + + // An empty string from the Hash in the JSON historically defaults to sha1. + switch hashalg { + case "sha1", "": + oaep = OAEPSha1Params + case "sha256": + oaep = OAEPSha256Params + default: + return nil, fmt.Errorf("Unsupported hash algorithm '%s' for decryption", hashalg) + } + + err = p11ctx.DecryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PrivKey) + if err != nil { + return nil, fmt.Errorf("DecryptInit failed: %w", err) + } + plaintext, err := p11ctx.Decrypt(session, ciphertext) + if err != nil { + return nil, fmt.Errorf("Decrypt failed: %w", err) + } + return plaintext, err +} + +// +// The following part deals with the JSON formatted message for multiple pkcs11 recipients +// + +// Pkcs11Blob holds the encrypted blobs for all recipients; this is what we will put into the image's annotations +type Pkcs11Blob struct { + Version uint `json:"version"` + Recipients []Pkcs11Recipient `json:"recipients"` +} + +// Pkcs11Recipient holds the b64-encoded and encrypted blob for a particular recipient +type Pkcs11Recipient struct { + Version uint `json:"version"` + Blob string `json:"blob"` + Hash string `json:"hash,omitempty"` +} + +// EncryptMultiple encrypts for one or multiple pkcs11 devices; the public keys passed to this function +// may either be *rsa.PublicKey or *pkcs11uri.Pkcs11URI; the returned byte array is a JSON string of the +// following format: +// { +// recipients: [ // recipient list +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// [...] +// ] +// } +func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { + var ( + ciphertext []byte + err error + pkcs11blob Pkcs11Blob = Pkcs11Blob{Version: 0} + hashalg string + ) + + for _, pubKey := range pubKeys { + switch pkey := pubKey.(type) { + case *rsa.PublicKey: + ciphertext, hashalg, err = rsaPublicEncryptOAEP(pkey, data) + case *Pkcs11KeyFileObject: + ciphertext, hashalg, err = publicEncryptOAEP(pkey, data) + default: + err = fmt.Errorf("Unsupported key object type for pkcs11 public key") + } + if err != nil { + return nil, err + } + + recipient := Pkcs11Recipient{ + Version: 0, + Blob: base64.StdEncoding.EncodeToString(ciphertext), + Hash: hashalg, + } + + pkcs11blob.Recipients = append(pkcs11blob.Recipients, recipient) + } + return json.Marshal(&pkcs11blob) +} + +// Decrypt tries to decrypt one of the recipients' blobs using a pkcs11 private key. +// The input pkcs11blobstr is a string with the following format: +// { +// recipients: [ // recipient list +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// [...] +// } +// Note: More recent versions of this code explicitly write 'sha1' +// while older versions left it empty in case of 'sha1'. +func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { + pkcs11blob := Pkcs11Blob{} + err := json.Unmarshal(pkcs11blobstr, &pkcs11blob) + if err != nil { + return nil, fmt.Errorf("Could not parse Pkcs11Blob: %w", err) + } + switch pkcs11blob.Version { + case 0: + // latest supported version + default: + return nil, fmt.Errorf("found Pkcs11Blob with version %d but maximum supported version is 0", pkcs11blob.Version) + } + // since we do trial and error, collect all encountered errors + errs := "" + + for _, recipient := range pkcs11blob.Recipients { + switch recipient.Version { + case 0: + // last supported version + default: + return nil, fmt.Errorf("found Pkcs11Recipient with version %d but maximum supported version is 0", recipient.Version) + } + + ciphertext, err := base64.StdEncoding.DecodeString(recipient.Blob) + if err != nil || len(ciphertext) == 0 { + // This should never happen... we skip over decoding issues + errs += fmt.Sprintf("Base64 decoding failed: %s\n", err) + continue + } + // try all keys until one works + for _, privKeyObj := range privKeyObjs { + plaintext, err := privateDecryptOAEP(privKeyObj, ciphertext, recipient.Hash) + if err == nil { + return plaintext, nil + } + if uri, err2 := privKeyObj.Uri.Format(); err2 == nil { + errs += fmt.Sprintf("%s : %s\n", uri, err) + } else { + errs += fmt.Sprintf("%s\n", err) + } + } + } + + return nil, fmt.Errorf("Could not find a pkcs11 key for decryption:\n%s", errs) +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go new file mode 100644 index 0000000000..6cf0aa2a9f --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go @@ -0,0 +1,30 @@ +//go:build !cgo +// +build !cgo + +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import "fmt" + +func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { + return nil, fmt.Errorf("ocicrypt pkcs11 not supported on this build") +} + +func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { + return nil, fmt.Errorf("ocicrypt pkcs11 not supported on this build") +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go new file mode 100644 index 0000000000..231da23174 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go @@ -0,0 +1,115 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "fmt" + "os" + "runtime" + "strings" + "sync" +) + +var ( + envLock sync.Mutex +) + +// setEnvVars sets the environment variables given in the map and locks the environment from +// modification with the same function; if successful, you *must* call restoreEnv with the return +// value from this function +func setEnvVars(env map[string]string) ([]string, error) { + envLock.Lock() + + if len(env) == 0 { + return nil, nil + } + + oldenv := os.Environ() + + for k, v := range env { + err := os.Setenv(k, v) + if err != nil { + restoreEnv(oldenv) + return nil, fmt.Errorf("Could not set environment variable '%s' to '%s': %w", k, v, err) + } + } + + return oldenv, nil +} + +func arrayToMap(elements []string) map[string]string { + o := make(map[string]string) + + for _, element := range elements { + p := strings.SplitN(element, "=", 2) + if len(p) == 2 { + o[p[0]] = p[1] + } + } + + return o +} + +// restoreEnv restores the environment to be exactly as given in the array of strings +// and unlocks the lock +func restoreEnv(envs []string) { + if envs != nil && len(envs) >= 0 { + target := arrayToMap(envs) + curr := arrayToMap(os.Environ()) + + for nc, vc := range curr { + vt, ok := target[nc] + if !ok { + os.Unsetenv(nc) + } else if vc == vt { + delete(target, nc) + } + } + + for nt, vt := range target { + os.Setenv(nt, vt) + } + } + + envLock.Unlock() +} + +func getHostAndOsType() (string, string, string) { + ht := "" + ot := "" + st := "" + switch runtime.GOOS { + case "linux": + ot = "linux" + st = "gnu" + switch runtime.GOARCH { + case "arm": + ht = "arm" + case "arm64": + ht = "aarch64" + case "amd64": + ht = "x86_64" + case "ppc64le": + ht = "powerpc64le" + case "riscv64": + ht = "riscv64" + case "s390x": + ht = "s390x" + } + } + return ht, ot, st +} diff --git a/vendor/github.com/containers/ocicrypt/encryption.go b/vendor/github.com/containers/ocicrypt/encryption.go new file mode 100644 index 0000000000..b6fa9db40e --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/encryption.go @@ -0,0 +1,356 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "strings" + + "github.com/containers/ocicrypt/blockcipher" + "github.com/containers/ocicrypt/config" + keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/keywrap/jwe" + "github.com/containers/ocicrypt/keywrap/keyprovider" + "github.com/containers/ocicrypt/keywrap/pgp" + "github.com/containers/ocicrypt/keywrap/pkcs11" + "github.com/containers/ocicrypt/keywrap/pkcs7" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + log "github.com/sirupsen/logrus" +) + +// EncryptLayerFinalizer is a finalizer run to return the annotations to set for +// the encrypted layer +type EncryptLayerFinalizer func() (map[string]string, error) + +func init() { + keyWrappers = make(map[string]keywrap.KeyWrapper) + keyWrapperAnnotations = make(map[string]string) + RegisterKeyWrapper("pgp", pgp.NewKeyWrapper()) + RegisterKeyWrapper("jwe", jwe.NewKeyWrapper()) + RegisterKeyWrapper("pkcs7", pkcs7.NewKeyWrapper()) + RegisterKeyWrapper("pkcs11", pkcs11.NewKeyWrapper()) + ic, err := keyproviderconfig.GetConfiguration() + if err != nil { + log.Error(err) + } else if ic != nil { + for provider, attrs := range ic.KeyProviderConfig { + RegisterKeyWrapper("provider."+provider, keyprovider.NewKeyWrapper(provider, attrs)) + } + } +} + +var keyWrappers map[string]keywrap.KeyWrapper +var keyWrapperAnnotations map[string]string + +// RegisterKeyWrapper allows to register key wrappers by their encryption scheme +func RegisterKeyWrapper(scheme string, iface keywrap.KeyWrapper) { + keyWrappers[scheme] = iface + keyWrapperAnnotations[iface.GetAnnotationID()] = scheme +} + +// GetKeyWrapper looks up the encryptor interface given an encryption scheme (gpg, jwe) +func GetKeyWrapper(scheme string) keywrap.KeyWrapper { + return keyWrappers[scheme] +} + +// GetWrappedKeysMap returns a map of wrappedKeys as values in a +// map with the encryption scheme(s) as the key(s) +func GetWrappedKeysMap(desc ocispec.Descriptor) map[string]string { + wrappedKeysMap := make(map[string]string) + + for annotationsID, scheme := range keyWrapperAnnotations { + if annotation, ok := desc.Annotations[annotationsID]; ok { + wrappedKeysMap[scheme] = annotation + } + } + return wrappedKeysMap +} + +// EncryptLayer encrypts the layer by running one encryptor after the other +func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, desc ocispec.Descriptor) (io.Reader, EncryptLayerFinalizer, error) { + var ( + encLayerReader io.Reader + err error + encrypted bool + bcFin blockcipher.Finalizer + privOptsData []byte + pubOptsData []byte + ) + + if ec == nil { + return nil, nil, errors.New("EncryptConfig must not be nil") + } + + for annotationsID := range keyWrapperAnnotations { + annotation := desc.Annotations[annotationsID] + if annotation != "" { + privOptsData, err = decryptLayerKeyOptsData(&ec.DecryptConfig, desc) + if err != nil { + return nil, nil, err + } + pubOptsData, err = getLayerPubOpts(desc) + if err != nil { + return nil, nil, err + } + // already encrypted! + encrypted = true + } + } + + if !encrypted { + encLayerReader, bcFin, err = commonEncryptLayer(encOrPlainLayerReader, desc.Digest, blockcipher.AES256CTR) + if err != nil { + return nil, nil, err + } + } + + encLayerFinalizer := func() (map[string]string, error) { + // If layer was already encrypted, bcFin should be nil, use existing optsData + if bcFin != nil { + opts, err := bcFin() + if err != nil { + return nil, err + } + privOptsData, err = json.Marshal(opts.Private) + if err != nil { + return nil, fmt.Errorf("could not JSON marshal opts: %w", err) + } + pubOptsData, err = json.Marshal(opts.Public) + if err != nil { + return nil, fmt.Errorf("could not JSON marshal opts: %w", err) + } + } + + newAnnotations := make(map[string]string) + keysWrapped := false + if len(keyWrapperAnnotations) == 0 { + return nil, errors.New("missing Annotations needed for decryption") + } + for annotationsID, scheme := range keyWrapperAnnotations { + b64Annotations := desc.Annotations[annotationsID] + keywrapper := GetKeyWrapper(scheme) + b64Annotations, err = preWrapKeys(keywrapper, ec, b64Annotations, privOptsData) + if err != nil { + return nil, err + } + if b64Annotations != "" { + keysWrapped = true + newAnnotations[annotationsID] = b64Annotations + } + } + + if !keysWrapped { + return nil, errors.New("no wrapped keys produced by encryption") + } + newAnnotations["org.opencontainers.image.enc.pubopts"] = base64.StdEncoding.EncodeToString(pubOptsData) + + if len(newAnnotations) == 0 { + return nil, errors.New("no encryptor found to handle encryption") + } + + return newAnnotations, err + } + + // if nothing was encrypted, we just return encLayer = nil + return encLayerReader, encLayerFinalizer, err + +} + +// preWrapKeys calls WrapKeys and handles the base64 encoding and concatenation of the +// annotation data +func preWrapKeys(keywrapper keywrap.KeyWrapper, ec *config.EncryptConfig, b64Annotations string, optsData []byte) (string, error) { + newAnnotation, err := keywrapper.WrapKeys(ec, optsData) + if err != nil || len(newAnnotation) == 0 { + return b64Annotations, err + } + b64newAnnotation := base64.StdEncoding.EncodeToString(newAnnotation) + if b64Annotations == "" { + return b64newAnnotation, nil + } + return b64Annotations + "," + b64newAnnotation, nil +} + +// DecryptLayer decrypts a layer trying one keywrap.KeyWrapper after the other to see whether it +// can apply the provided private key +// If unwrapOnly is set we will only try to decrypt the layer encryption key and return +func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error) { + if dc == nil { + return nil, "", errors.New("DecryptConfig must not be nil") + } + privOptsData, err := decryptLayerKeyOptsData(dc, desc) + if err != nil || unwrapOnly { + return nil, "", err + } + + var pubOptsData []byte + pubOptsData, err = getLayerPubOpts(desc) + if err != nil { + return nil, "", err + } + + return commonDecryptLayer(encLayerReader, privOptsData, pubOptsData) +} + +func decryptLayerKeyOptsData(dc *config.DecryptConfig, desc ocispec.Descriptor) ([]byte, error) { + privKeyGiven := false + errs := "" + if len(keyWrapperAnnotations) == 0 { + return nil, errors.New("missing Annotations needed for decryption") + } + for annotationsID, scheme := range keyWrapperAnnotations { + b64Annotation := desc.Annotations[annotationsID] + if b64Annotation != "" { + keywrapper := GetKeyWrapper(scheme) + + if keywrapper.NoPossibleKeys(dc.Parameters) { + continue + } + + if len(keywrapper.GetPrivateKeys(dc.Parameters)) > 0 { + privKeyGiven = true + } + optsData, err := preUnwrapKey(keywrapper, dc, b64Annotation) + if err != nil { + // try next keywrap.KeyWrapper + errs += fmt.Sprintf("%s\n", err) + continue + } + if optsData == nil { + // try next keywrap.KeyWrapper + continue + } + return optsData, nil + } + } + if !privKeyGiven { + return nil, fmt.Errorf("missing private key needed for decryption:\n%s", errs) + } + return nil, fmt.Errorf("no suitable key unwrapper found or none of the private keys could be used for decryption:\n%s", errs) +} + +func getLayerPubOpts(desc ocispec.Descriptor) ([]byte, error) { + pubOptsString := desc.Annotations["org.opencontainers.image.enc.pubopts"] + if pubOptsString == "" { + return json.Marshal(blockcipher.PublicLayerBlockCipherOptions{}) + } + return base64.StdEncoding.DecodeString(pubOptsString) +} + +// preUnwrapKey decodes the comma separated base64 strings and calls the Unwrap function +// of the given keywrapper with it and returns the result in case the Unwrap functions +// does not return an error. If all attempts fail, an error is returned. +func preUnwrapKey(keywrapper keywrap.KeyWrapper, dc *config.DecryptConfig, b64Annotations string) ([]byte, error) { + if b64Annotations == "" { + return nil, nil + } + errs := "" + for _, b64Annotation := range strings.Split(b64Annotations, ",") { + annotation, err := base64.StdEncoding.DecodeString(b64Annotation) + if err != nil { + return nil, errors.New("could not base64 decode the annotation") + } + optsData, err := keywrapper.UnwrapKey(dc, annotation) + if err != nil { + errs += fmt.Sprintf("- %s\n", err) + continue + } + return optsData, nil + } + return nil, fmt.Errorf("no suitable key found for decrypting layer key:\n%s", errs) +} + +// commonEncryptLayer is a function to encrypt the plain layer using a new random +// symmetric key and return the LayerBlockCipherHandler's JSON in string form for +// later use during decryption +func commonEncryptLayer(plainLayerReader io.Reader, d digest.Digest, typ blockcipher.LayerCipherType) (io.Reader, blockcipher.Finalizer, error) { + lbch, err := blockcipher.NewLayerBlockCipherHandler() + if err != nil { + return nil, nil, err + } + + encLayerReader, bcFin, err := lbch.Encrypt(plainLayerReader, typ) + if err != nil { + return nil, nil, err + } + + newBcFin := func() (blockcipher.LayerBlockCipherOptions, error) { + lbco, err := bcFin() + if err != nil { + return blockcipher.LayerBlockCipherOptions{}, err + } + lbco.Private.Digest = d + return lbco, nil + } + + return encLayerReader, newBcFin, err +} + +// commonDecryptLayer decrypts an encrypted layer previously encrypted with commonEncryptLayer +// by passing along the optsData +func commonDecryptLayer(encLayerReader io.Reader, privOptsData []byte, pubOptsData []byte) (io.Reader, digest.Digest, error) { + privOpts := blockcipher.PrivateLayerBlockCipherOptions{} + err := json.Unmarshal(privOptsData, &privOpts) + if err != nil { + return nil, "", fmt.Errorf("could not JSON unmarshal privOptsData: %w", err) + } + + lbch, err := blockcipher.NewLayerBlockCipherHandler() + if err != nil { + return nil, "", err + } + + pubOpts := blockcipher.PublicLayerBlockCipherOptions{} + if len(pubOptsData) > 0 { + err := json.Unmarshal(pubOptsData, &pubOpts) + if err != nil { + return nil, "", fmt.Errorf("could not JSON unmarshal pubOptsData: %w", err) + } + } + + opts := blockcipher.LayerBlockCipherOptions{ + Private: privOpts, + Public: pubOpts, + } + + plainLayerReader, opts, err := lbch.Decrypt(encLayerReader, opts) + if err != nil { + return nil, "", err + } + + return plainLayerReader, opts.Private.Digest, nil +} + +// FilterOutAnnotations filters out the annotations belonging to the image encryption 'namespace' +// and returns a map with those taken out +func FilterOutAnnotations(annotations map[string]string) map[string]string { + a := make(map[string]string) + if len(annotations) > 0 { + for k, v := range annotations { + if strings.HasPrefix(k, "org.opencontainers.image.enc.") { + continue + } + a[k] = v + } + } + return a +} diff --git a/vendor/github.com/containers/ocicrypt/gpg.go b/vendor/github.com/containers/ocicrypt/gpg.go new file mode 100644 index 0000000000..3bba4669bd --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/gpg.go @@ -0,0 +1,431 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "errors" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "strconv" + "strings" + "sync" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/term" +) + +// GPGVersion enum representing the GPG client version to use. +type GPGVersion int + +const ( + // GPGv2 signifies gpgv2+ + GPGv2 GPGVersion = iota + // GPGv1 signifies gpgv1+ + GPGv1 + // GPGVersionUndetermined signifies gpg client version undetermined + GPGVersionUndetermined +) + +// GPGClient defines an interface for wrapping the gpg command line tools +type GPGClient interface { + // ReadGPGPubRingFile gets the byte sequence of the gpg public keyring + ReadGPGPubRingFile() ([]byte, error) + // GetGPGPrivateKey gets the private key bytes of a keyid given a passphrase + GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte, error) + // GetSecretKeyDetails gets the details of a secret key + GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) + // GetKeyDetails gets the details of a public key + GetKeyDetails(keyid uint64) ([]byte, bool, error) + // ResolveRecipients resolves PGP key ids to user names + ResolveRecipients([]string) []string +} + +// gpgClient contains generic gpg client information +type gpgClient struct { + gpgHomeDir string +} + +// gpgv2Client is a gpg2 client +type gpgv2Client struct { + gpgClient +} + +// gpgv1Client is a gpg client +type gpgv1Client struct { + gpgClient +} + +// GuessGPGVersion guesses the version of gpg. Defaults to gpg2 if exists, if +// not defaults to regular gpg. +func GuessGPGVersion() GPGVersion { + if err := exec.Command("gpg2", "--version").Run(); err == nil { + return GPGv2 + } else if err := exec.Command("gpg", "--version").Run(); err == nil { + return GPGv1 + } + return GPGVersionUndetermined +} + +// NewGPGClient creates a new GPGClient object representing the given version +// and using the given home directory +func NewGPGClient(gpgVersion, gpgHomeDir string) (GPGClient, error) { + v := new(GPGVersion) + switch gpgVersion { + case "v1": + *v = GPGv1 + case "v2": + *v = GPGv2 + default: + v = nil + } + return newGPGClient(v, gpgHomeDir) +} + +func newGPGClient(version *GPGVersion, homedir string) (GPGClient, error) { + var gpgVersion GPGVersion + if version != nil { + gpgVersion = *version + } else { + gpgVersion = GuessGPGVersion() + } + + switch gpgVersion { + case GPGv1: + return &gpgv1Client{ + gpgClient: gpgClient{gpgHomeDir: homedir}, + }, nil + case GPGv2: + return &gpgv2Client{ + gpgClient: gpgClient{gpgHomeDir: homedir}, + }, nil + case GPGVersionUndetermined: + return nil, fmt.Errorf("unable to determine GPG version") + default: + return nil, fmt.Errorf("unhandled case: NewGPGClient") + } +} + +// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase +func (gc *gpgv2Client) GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + + rfile, wfile, err := os.Pipe() + if err != nil { + return nil, fmt.Errorf("could not create pipe: %w", err) + } + defer func() { + rfile.Close() + wfile.Close() + }() + // fill pipe in background + go func(passphrase string) { + _, _ = wfile.Write([]byte(passphrase)) + wfile.Close() + }(passphrase) + + args = append(args, []string{"--pinentry-mode", "loopback", "--batch", "--passphrase-fd", fmt.Sprintf("%d", 3), "--export-secret-key", fmt.Sprintf("0x%x", keyid)}...) + + cmd := exec.Command("gpg2", args...) + cmd.ExtraFiles = []*os.File{rfile} + + return runGPGGetOutput(cmd) +} + +// ReadGPGPubRingFile reads the GPG public key ring file +func (gc *gpgv2Client) ReadGPGPubRingFile() ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + args = append(args, []string{"--batch", "--export"}...) + + cmd := exec.Command("gpg2", args...) + + return runGPGGetOutput(cmd) +} + +func (gc *gpgv2Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = []string{"--homedir", gc.gpgHomeDir} + } + args = append(args, option, fmt.Sprintf("0x%x", keyid)) + + cmd := exec.Command("gpg2", args...) + + keydata, err := runGPGGetOutput(cmd) + return keydata, err == nil, err +} + +// GetSecretKeyDetails retrieves the secret key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv2Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-K", keyid) +} + +// GetKeyDetails retrieves the public key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv2Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-k", keyid) +} + +// ResolveRecipients converts PGP keyids to email addresses, if possible +func (gc *gpgv2Client) ResolveRecipients(recipients []string) []string { + return resolveRecipients(gc, recipients) +} + +// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase +func (gc *gpgv1Client) GetGPGPrivateKey(keyid uint64, _ string) ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + args = append(args, []string{"--batch", "--export-secret-key", fmt.Sprintf("0x%x", keyid)}...) + + cmd := exec.Command("gpg", args...) + + return runGPGGetOutput(cmd) +} + +// ReadGPGPubRingFile reads the GPG public key ring file +func (gc *gpgv1Client) ReadGPGPubRingFile() ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + args = append(args, []string{"--batch", "--export"}...) + + cmd := exec.Command("gpg", args...) + + return runGPGGetOutput(cmd) +} + +func (gc *gpgv1Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = []string{"--homedir", gc.gpgHomeDir} + } + args = append(args, option, fmt.Sprintf("0x%x", keyid)) + + cmd := exec.Command("gpg", args...) + + keydata, err := runGPGGetOutput(cmd) + + return keydata, err == nil, err +} + +// GetSecretKeyDetails retrieves the secret key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv1Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-K", keyid) +} + +// GetKeyDetails retrieves the public key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv1Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-k", keyid) +} + +// ResolveRecipients converts PGP keyids to email addresses, if possible +func (gc *gpgv1Client) ResolveRecipients(recipients []string) []string { + return resolveRecipients(gc, recipients) +} + +// runGPGGetOutput runs the GPG commandline and returns stdout as byte array +// and any stderr in the error +func runGPGGetOutput(cmd *exec.Cmd) ([]byte, error) { + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + if err := cmd.Start(); err != nil { + return nil, err + } + + stdoutstr, err2 := io.ReadAll(stdout) + stderrstr, _ := io.ReadAll(stderr) + + if err := cmd.Wait(); err != nil { + return nil, fmt.Errorf("error from %s: %s", cmd.Path, string(stderrstr)) + } + + return stdoutstr, err2 +} + +// resolveRecipients walks the list of recipients and attempts to convert +// all keyIds to email addresses; if something goes wrong during the +// conversion of a recipient, the original string is returned for that +// recpient +func resolveRecipients(gc GPGClient, recipients []string) []string { + var result []string + + for _, recipient := range recipients { + keyID, err := strconv.ParseUint(recipient, 0, 64) + if err != nil { + result = append(result, recipient) + } else { + details, found, _ := gc.GetKeyDetails(keyID) + if !found { + result = append(result, recipient) + } else { + email := extractEmailFromDetails(details) + if email == "" { + result = append(result, recipient) + } else { + result = append(result, email) + } + } + } + } + return result +} + +var ( + onceRegexp sync.Once + emailPattern *regexp.Regexp +) + +func extractEmailFromDetails(details []byte) string { + onceRegexp.Do(func() { + emailPattern = regexp.MustCompile(`uid\s+\[.*\]\s.*\s<(?P.+)>`) + }) + loc := emailPattern.FindSubmatchIndex(details) + if len(loc) == 0 { + return "" + } + return string(emailPattern.Expand(nil, []byte("$email"), details, loc)) +} + +// uint64ToStringArray converts an array of uint64's to an array of strings +// by applying a format string to each uint64 +func uint64ToStringArray(format string, in []uint64) []string { + var ret []string + + for _, v := range in { + ret = append(ret, fmt.Sprintf(format, v)) + } + return ret +} + +// GPGGetPrivateKey walks the list of layerInfos and tries to decrypt the +// wrapped symmetric keys. For this it determines whether a private key is +// in the GPGVault or on this system and prompts for the passwords for those +// that are available. If we do not find a private key on the system for +// getting to the symmetric key of a layer then an error is generated. +func GPGGetPrivateKey(descs []ocispec.Descriptor, gpgClient GPGClient, gpgVault GPGVault, mustFindKey bool) (gpgPrivKeys [][]byte, gpgPrivKeysPwds [][]byte, err error) { + // PrivateKeyData describes a private key + type PrivateKeyData struct { + KeyData []byte + KeyDataPassword []byte + } + var pkd PrivateKeyData + keyIDPasswordMap := make(map[uint64]PrivateKeyData) + + for _, desc := range descs { + for scheme, b64pgpPackets := range GetWrappedKeysMap(desc) { + if scheme != "pgp" { + continue + } + keywrapper := GetKeyWrapper(scheme) + if keywrapper == nil { + return nil, nil, fmt.Errorf("could not get KeyWrapper for %s", scheme) + } + keyIds, err := keywrapper.GetKeyIdsFromPacket(b64pgpPackets) + if err != nil { + return nil, nil, err + } + + found := false + for _, keyid := range keyIds { + // do we have this key? -- first check the vault + if gpgVault != nil { + _, keydata := gpgVault.GetGPGPrivateKey(keyid) + if len(keydata) > 0 { + pkd = PrivateKeyData{ + KeyData: keydata, + KeyDataPassword: nil, // password not supported in this case + } + keyIDPasswordMap[keyid] = pkd + found = true + break + } + } else if gpgClient != nil { + // check the local system's gpg installation + keyinfo, haveKey, _ := gpgClient.GetSecretKeyDetails(keyid) + // this may fail if the key is not here; we ignore the error + if !haveKey { + // key not on this system + continue + } + + _, found = keyIDPasswordMap[keyid] + if !found { + fmt.Printf("Passphrase required for Key id 0x%x: \n%v", keyid, string(keyinfo)) + fmt.Printf("Enter passphrase for key with Id 0x%x: ", keyid) + + password, err := term.ReadPassword(int(os.Stdin.Fd())) + fmt.Printf("\n") + if err != nil { + return nil, nil, err + } + keydata, err := gpgClient.GetGPGPrivateKey(keyid, string(password)) + if err != nil { + return nil, nil, err + } + pkd = PrivateKeyData{ + KeyData: keydata, + KeyDataPassword: password, + } + keyIDPasswordMap[keyid] = pkd + found = true + } + break + } else { + return nil, nil, errors.New("no GPGVault or GPGClient passed") + } + } + if !found && len(b64pgpPackets) > 0 && mustFindKey { + ids := uint64ToStringArray("0x%x", keyIds) + + return nil, nil, fmt.Errorf("missing key for decryption of layer %x of %s. Need one of the following keys: %s", desc.Digest, desc.Platform, strings.Join(ids, ", ")) + } + } + } + + for _, pkd := range keyIDPasswordMap { + gpgPrivKeys = append(gpgPrivKeys, pkd.KeyData) + gpgPrivKeysPwds = append(gpgPrivKeysPwds, pkd.KeyDataPassword) + } + + return gpgPrivKeys, gpgPrivKeysPwds, nil +} diff --git a/vendor/github.com/containers/ocicrypt/gpgvault.go b/vendor/github.com/containers/ocicrypt/gpgvault.go new file mode 100644 index 0000000000..f1bd0d989a --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/gpgvault.go @@ -0,0 +1,100 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "bytes" + "fmt" + "os" + + "golang.org/x/crypto/openpgp" + "golang.org/x/crypto/openpgp/packet" +) + +// GPGVault defines an interface for wrapping multiple secret key rings +type GPGVault interface { + // AddSecretKeyRingData adds a secret keyring via its raw byte array + AddSecretKeyRingData(gpgSecretKeyRingData []byte) error + // AddSecretKeyRingDataArray adds secret keyring via its raw byte arrays + AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) error + // AddSecretKeyRingFiles adds secret keyrings given their filenames + AddSecretKeyRingFiles(filenames []string) error + // GetGPGPrivateKey gets the private key bytes of a keyid given a passphrase + GetGPGPrivateKey(keyid uint64) ([]openpgp.Key, []byte) +} + +// gpgVault wraps an array of gpgSecretKeyRing +type gpgVault struct { + entityLists []openpgp.EntityList + keyDataList [][]byte // the raw data original passed in +} + +// NewGPGVault creates an empty GPGVault +func NewGPGVault() GPGVault { + return &gpgVault{} +} + +// AddSecretKeyRingData adds a secret keyring's to the gpgVault; the raw byte +// array read from the file must be passed and will be parsed by this function +func (g *gpgVault) AddSecretKeyRingData(gpgSecretKeyRingData []byte) error { + // read the private keys + r := bytes.NewReader(gpgSecretKeyRingData) + entityList, err := openpgp.ReadKeyRing(r) + if err != nil { + return fmt.Errorf("could not read keyring: %w", err) + } + g.entityLists = append(g.entityLists, entityList) + g.keyDataList = append(g.keyDataList, gpgSecretKeyRingData) + return nil +} + +// AddSecretKeyRingDataArray adds secret keyrings to the gpgVault; the raw byte +// arrays read from files must be passed +func (g *gpgVault) AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) error { + for _, gpgSecretKeyRingData := range gpgSecretKeyRingDataArray { + if err := g.AddSecretKeyRingData(gpgSecretKeyRingData); err != nil { + return err + } + } + return nil +} + +// AddSecretKeyRingFiles adds the secret key rings given their filenames +func (g *gpgVault) AddSecretKeyRingFiles(filenames []string) error { + for _, filename := range filenames { + gpgSecretKeyRingData, err := os.ReadFile(filename) + if err != nil { + return err + } + err = g.AddSecretKeyRingData(gpgSecretKeyRingData) + if err != nil { + return err + } + } + return nil +} + +// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase +func (g *gpgVault) GetGPGPrivateKey(keyid uint64) ([]openpgp.Key, []byte) { + for i, el := range g.entityLists { + decKeys := el.KeysByIdUsage(keyid, packet.KeyFlagEncryptCommunications) + if len(decKeys) > 0 { + return decKeys, g.keyDataList[i] + } + } + return nil, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go new file mode 100644 index 0000000000..c1bdd6fbeb --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go @@ -0,0 +1,156 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package jwe + +import ( + "crypto/ecdsa" + "errors" + "fmt" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + "github.com/go-jose/go-jose/v4" +) + +type jweKeyWrapper struct { +} + +func (kw *jweKeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.jwe" +} + +// NewKeyWrapper returns a new key wrapping interface using jwe +func NewKeyWrapper() keywrap.KeyWrapper { + return &jweKeyWrapper{} +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *jweKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + var joseRecipients []jose.Recipient + + err := addPubKeys(&joseRecipients, ec.Parameters["pubkeys"]) + if err != nil { + return nil, err + } + // no recipients is not an error... + if len(joseRecipients) == 0 { + return nil, nil + } + + encrypter, err := jose.NewMultiEncrypter(jose.A256GCM, joseRecipients, nil) + if err != nil { + return nil, fmt.Errorf("jose.NewMultiEncrypter failed: %w", err) + } + jwe, err := encrypter.Encrypt(optsData) + if err != nil { + return nil, fmt.Errorf("JWE Encrypt failed: %w", err) + } + return []byte(jwe.FullSerialize()), nil +} + +func (kw *jweKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jweString []byte) ([]byte, error) { + // cf. list of algorithms in func addPubKeys() below + keyEncryptionAlgorithms := []jose.KeyAlgorithm{jose.RSA_OAEP, jose.RSA_OAEP_256, jose.ECDH_ES_A128KW, jose.ECDH_ES_A192KW, jose.ECDH_ES_A256KW} + // accept all algorithms defined in RFC 7518, section 5.1 + contentEncryption := []jose.ContentEncryption{jose.A128CBC_HS256, jose.A192CBC_HS384, jose.A256CBC_HS512, jose.A128GCM, jose.A192GCM, jose.A256GCM} + jwe, err := jose.ParseEncrypted(string(jweString), keyEncryptionAlgorithms, contentEncryption) + if err != nil { + return nil, errors.New("jose.ParseEncrypted failed") + } + + privKeys := kw.GetPrivateKeys(dc.Parameters) + if len(privKeys) == 0 { + return nil, errors.New("No private keys found for JWE decryption") + } + privKeysPasswords := kw.getPrivateKeysPasswords(dc.Parameters) + if len(privKeysPasswords) != len(privKeys) { + return nil, errors.New("Private key password array length must be same as that of private keys") + } + + for idx, privKey := range privKeys { + key, err := utils.ParsePrivateKey(privKey, privKeysPasswords[idx], "JWE") + if err != nil { + return nil, err + } + _, _, plain, err := jwe.DecryptMulti(key) + if err == nil { + return plain, nil + } + } + return nil, errors.New("JWE: No suitable private key found for decryption") +} + +func (kw *jweKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *jweKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys"] +} + +func (kw *jweKeyWrapper) getPrivateKeysPasswords(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys-passwords"] +} + +func (kw *jweKeyWrapper) GetKeyIdsFromPacket(b64jwes string) ([]uint64, error) { + return nil, nil +} + +func (kw *jweKeyWrapper) GetRecipients(b64jwes string) ([]string, error) { + return []string{"[jwe]"}, nil +} + +func addPubKeys(joseRecipients *[]jose.Recipient, pubKeys [][]byte) error { + if len(pubKeys) == 0 { + return nil + } + for _, pubKey := range pubKeys { + key, err := utils.ParsePublicKey(pubKey, "JWE") + if err != nil { + return err + } + + alg := jose.RSA_OAEP + switch key := key.(type) { + case *ecdsa.PublicKey: + alg = jose.ECDH_ES_A256KW + case *jose.JSONWebKey: + if key.Algorithm != "" { + alg = jose.KeyAlgorithm(key.Algorithm) + switch alg { + /* accepted algorithms */ + case jose.RSA_OAEP: + case jose.RSA_OAEP_256: + case jose.ECDH_ES_A128KW: + case jose.ECDH_ES_A192KW: + case jose.ECDH_ES_A256KW: + /* all others are rejected */ + default: + return fmt.Errorf("%s is an unsupported JWE key algorithm", alg) + } + } + } + + *joseRecipients = append(*joseRecipients, jose.Recipient{ + Algorithm: alg, + Key: key, + }) + } + return nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go new file mode 100644 index 0000000000..6ac0fcb95a --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go @@ -0,0 +1,242 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package keyprovider + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/containers/ocicrypt/config" + keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + keyproviderpb "github.com/containers/ocicrypt/utils/keyprovider" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" +) + +type keyProviderKeyWrapper struct { + provider string + attrs keyproviderconfig.KeyProviderAttrs +} + +func (kw *keyProviderKeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.provider." + kw.provider +} + +// NewKeyWrapper returns a new key wrapping interface using keyprovider +func NewKeyWrapper(p string, a keyproviderconfig.KeyProviderAttrs) keywrap.KeyWrapper { + return &keyProviderKeyWrapper{provider: p, attrs: a} +} + +type KeyProviderKeyWrapProtocolOperation string + +var ( + OpKeyWrap KeyProviderKeyWrapProtocolOperation = "keywrap" + OpKeyUnwrap KeyProviderKeyWrapProtocolOperation = "keyunwrap" +) + +// KeyProviderKeyWrapProtocolInput defines the input to the key provider binary or grpc method. +type KeyProviderKeyWrapProtocolInput struct { + // Operation is either "keywrap" or "keyunwrap" + Operation KeyProviderKeyWrapProtocolOperation `json:"op"` + // KeyWrapParams encodes the arguments to key wrap if operation is set to wrap + KeyWrapParams KeyWrapParams `json:"keywrapparams,omitempty"` + // KeyUnwrapParams encodes the arguments to key unwrap if operation is set to unwrap + KeyUnwrapParams KeyUnwrapParams `json:"keyunwrapparams,omitempty"` +} + +// KeyProviderKeyWrapProtocolOutput defines the output of the key provider binary or grpc method. +type KeyProviderKeyWrapProtocolOutput struct { + // KeyWrapResult encodes the results to key wrap if operation is to wrap + KeyWrapResults KeyWrapResults `json:"keywrapresults,omitempty"` + // KeyUnwrapResult encodes the result to key unwrap if operation is to unwrap + KeyUnwrapResults KeyUnwrapResults `json:"keyunwrapresults,omitempty"` +} + +type KeyWrapParams struct { + Ec *config.EncryptConfig `json:"ec"` + OptsData []byte `json:"optsdata"` +} + +type KeyUnwrapParams struct { + Dc *config.DecryptConfig `json:"dc"` + Annotation []byte `json:"annotation"` +} + +type KeyUnwrapResults struct { + OptsData []byte `json:"optsdata"` +} + +type KeyWrapResults struct { + Annotation []byte `json:"annotation"` +} + +var runner utils.CommandExecuter + +func init() { + runner = utils.Runner{} +} + +// WrapKeys calls appropriate binary executable/grpc server for wrapping the session key for recipients and gets encrypted optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *keyProviderKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + + input, err := json.Marshal(KeyProviderKeyWrapProtocolInput{ + Operation: OpKeyWrap, + KeyWrapParams: KeyWrapParams{ + Ec: ec, + OptsData: optsData, + }, + }) + + if err != nil { + return nil, err + } + + if _, ok := ec.Parameters[kw.provider]; ok { + if kw.attrs.Command != nil { + protocolOuput, err := getProviderCommandOutput(input, kw.attrs.Command) + if err != nil { + return nil, fmt.Errorf("error while retrieving keyprovider protocol command output: %w", err) + } + return protocolOuput.KeyWrapResults.Annotation, nil + } else if kw.attrs.Grpc != "" { + protocolOuput, err := getProviderGRPCOutput(input, kw.attrs.Grpc, OpKeyWrap) + if err != nil { + return nil, fmt.Errorf("error while retrieving keyprovider protocol grpc output: %w", err) + } + + return protocolOuput.KeyWrapResults.Annotation, nil + } + return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") + } + + return nil, nil +} + +// UnwrapKey calls appropriate binary executable/grpc server for unwrapping the session key based on the protocol given in annotation for recipients and gets decrypted optsData, +// which describe the symmetric key used for decrypting the layer +func (kw *keyProviderKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byte) ([]byte, error) { + input, err := json.Marshal(KeyProviderKeyWrapProtocolInput{ + Operation: OpKeyUnwrap, + KeyUnwrapParams: KeyUnwrapParams{ + Dc: dc, + Annotation: jsonString, + }, + }) + if err != nil { + return nil, err + } + + if kw.attrs.Command != nil { + protocolOuput, err := getProviderCommandOutput(input, kw.attrs.Command) + if err != nil { + // If err is not nil, then ignore it and continue with rest of the given keyproviders + return nil, err + } + + return protocolOuput.KeyUnwrapResults.OptsData, nil + } else if kw.attrs.Grpc != "" { + protocolOuput, err := getProviderGRPCOutput(input, kw.attrs.Grpc, OpKeyUnwrap) + if err != nil { + // If err is not nil, then ignore it and continue with rest of the given keyproviders + return nil, err + } + + return protocolOuput.KeyUnwrapResults.OptsData, nil + } + return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") +} + +func getProviderGRPCOutput(input []byte, connString string, operation KeyProviderKeyWrapProtocolOperation) (*KeyProviderKeyWrapProtocolOutput, error) { + var protocolOuput KeyProviderKeyWrapProtocolOutput + var grpcOutput *keyproviderpb.KeyProviderKeyWrapProtocolOutput + cc, err := grpc.Dial(connString, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("error while dialing rpc server: %w", err) + } + defer func() { + derr := cc.Close() + if derr != nil { + log.WithError(derr).Error("Error closing grpc socket") + } + }() + + client := keyproviderpb.NewKeyProviderServiceClient(cc) + req := &keyproviderpb.KeyProviderKeyWrapProtocolInput{ + KeyProviderKeyWrapProtocolInput: input, + } + + if operation == OpKeyWrap { + grpcOutput, err = client.WrapKey(context.Background(), req) + if err != nil { + return nil, fmt.Errorf("Error from grpc method: %w", err) + } + } else if operation == OpKeyUnwrap { + grpcOutput, err = client.UnWrapKey(context.Background(), req) + if err != nil { + return nil, fmt.Errorf("Error from grpc method: %w", err) + } + } else { + return nil, errors.New("Unsupported operation") + } + + respBytes := grpcOutput.GetKeyProviderKeyWrapProtocolOutput() + err = json.Unmarshal(respBytes, &protocolOuput) + if err != nil { + return nil, fmt.Errorf("Error while unmarshalling grpc method output: %w", err) + } + + return &protocolOuput, nil +} + +func getProviderCommandOutput(input []byte, command *keyproviderconfig.Command) (*KeyProviderKeyWrapProtocolOutput, error) { + var protocolOuput KeyProviderKeyWrapProtocolOutput + // Convert interface to command structure + respBytes, err := runner.Exec(command.Path, command.Args, input) + if err != nil { + return nil, err + } + err = json.Unmarshal(respBytes, &protocolOuput) + if err != nil { + return nil, fmt.Errorf("Error while unmarshalling binary executable command output: %w", err) + } + return &protocolOuput, nil +} + +// Return false as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return false +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return nil +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetKeyIdsFromPacket(_ string) ([]uint64, error) { + return nil, nil +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetRecipients(_ string) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go b/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go new file mode 100644 index 0000000000..ed25e7dac3 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go @@ -0,0 +1,48 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package keywrap + +import ( + "github.com/containers/ocicrypt/config" +) + +// KeyWrapper is the interface used for wrapping keys using +// a specific encryption technology (pgp, jwe) +type KeyWrapper interface { + WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) + UnwrapKey(dc *config.DecryptConfig, annotation []byte) ([]byte, error) + GetAnnotationID() string + + // NoPossibleKeys returns true if there is no possibility of performing + // decryption for parameters provided. + NoPossibleKeys(dcparameters map[string][][]byte) bool + + // GetPrivateKeys (optional) gets the array of private keys. It is an optional implementation + // as in some key services, a private key may not be exportable (i.e. HSM) + // If not implemented, return nil + GetPrivateKeys(dcparameters map[string][][]byte) [][]byte + + // GetKeyIdsFromPacket (optional) gets a list of key IDs. This is optional as some encryption + // schemes may not have a notion of key IDs + // If not implemented, return the nil slice + GetKeyIdsFromPacket(packet string) ([]uint64, error) + + // GetRecipients (optional) gets a list of recipients. It is optional due to the validity of + // recipients in a particular encryptiong scheme + // If not implemented, return the nil slice + GetRecipients(packet string) ([]string, error) +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go b/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go new file mode 100644 index 0000000000..4ab9bd9783 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go @@ -0,0 +1,272 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pgp + +import ( + "bytes" + "crypto" + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + "io" + "net/mail" + "strconv" + "strings" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/keywrap" + "golang.org/x/crypto/openpgp" + "golang.org/x/crypto/openpgp/packet" +) + +type gpgKeyWrapper struct { +} + +// NewKeyWrapper returns a new key wrapping interface for pgp +func NewKeyWrapper() keywrap.KeyWrapper { + return &gpgKeyWrapper{} +} + +var ( + // GPGDefaultEncryptConfig is the default configuration for layer encryption/decryption + GPGDefaultEncryptConfig = &packet.Config{ + Rand: rand.Reader, + DefaultHash: crypto.SHA256, + DefaultCipher: packet.CipherAES256, + CompressionConfig: &packet.CompressionConfig{Level: 0}, // No compression + RSABits: 2048, + } +) + +func (kw *gpgKeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.pgp" +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *gpgKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + ciphertext := new(bytes.Buffer) + el, err := kw.createEntityList(ec) + if err != nil { + return nil, fmt.Errorf("unable to create entity list: %w", err) + } + if len(el) == 0 { + // nothing to do -- not an error + return nil, nil + } + + plaintextWriter, err := openpgp.Encrypt(ciphertext, + el, /*EntityList*/ + nil, /* Sign*/ + nil, /* FileHint */ + GPGDefaultEncryptConfig) + if err != nil { + return nil, err + } + + if _, err = plaintextWriter.Write(optsData); err != nil { + return nil, err + } else if err = plaintextWriter.Close(); err != nil { + return nil, err + } + return ciphertext.Bytes(), err +} + +// UnwrapKey unwraps the symmetric key with which the layer is encrypted +// This symmetric key is encrypted in the PGP payload. +func (kw *gpgKeyWrapper) UnwrapKey(dc *config.DecryptConfig, pgpPacket []byte) ([]byte, error) { + pgpPrivateKeys, pgpPrivateKeysPwd, err := kw.getKeyParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for idx, pgpPrivateKey := range pgpPrivateKeys { + r := bytes.NewBuffer(pgpPrivateKey) + entityList, err := openpgp.ReadKeyRing(r) + if err != nil { + return nil, fmt.Errorf("unable to parse private keys: %w", err) + } + + var prompt openpgp.PromptFunction + if len(pgpPrivateKeysPwd) > idx { + responded := false + prompt = func(keys []openpgp.Key, symmetric bool) ([]byte, error) { + if responded { + return nil, fmt.Errorf("don't seem to have the right password") + } + responded = true + for _, key := range keys { + if key.PrivateKey != nil { + _ = key.PrivateKey.Decrypt(pgpPrivateKeysPwd[idx]) + } + } + return pgpPrivateKeysPwd[idx], nil + } + } + + r = bytes.NewBuffer(pgpPacket) + md, err := openpgp.ReadMessage(r, entityList, prompt, GPGDefaultEncryptConfig) + if err != nil { + continue + } + // we get the plain key options back + optsData, err := io.ReadAll(md.UnverifiedBody) + if err != nil { + continue + } + return optsData, nil + } + return nil, errors.New("PGP: No suitable key found to unwrap key") +} + +// GetKeyIdsFromWrappedKeys converts the base64 encoded PGPPacket to uint64 keyIds +func (kw *gpgKeyWrapper) GetKeyIdsFromPacket(b64pgpPackets string) ([]uint64, error) { + + var keyids []uint64 + for _, b64pgpPacket := range strings.Split(b64pgpPackets, ",") { + pgpPacket, err := base64.StdEncoding.DecodeString(b64pgpPacket) + if err != nil { + return nil, fmt.Errorf("could not decode base64 encoded PGP packet: %w", err) + } + newids, err := kw.getKeyIDs(pgpPacket) + if err != nil { + return nil, err + } + keyids = append(keyids, newids...) + } + return keyids, nil +} + +// getKeyIDs parses a PGPPacket and gets the list of recipients' key IDs +func (kw *gpgKeyWrapper) getKeyIDs(pgpPacket []byte) ([]uint64, error) { + var keyids []uint64 + + kbuf := bytes.NewBuffer(pgpPacket) + packets := packet.NewReader(kbuf) +ParsePackets: + for { + p, err := packets.Next() + if err == io.EOF { + break ParsePackets + } + if err != nil { + return []uint64{}, fmt.Errorf("packets.Next() failed: %w", err) + } + switch p := p.(type) { + case *packet.EncryptedKey: + keyids = append(keyids, p.KeyId) + case *packet.SymmetricallyEncrypted: + break ParsePackets + } + } + return keyids, nil +} + +// GetRecipients converts the wrappedKeys to an array of recipients +func (kw *gpgKeyWrapper) GetRecipients(b64pgpPackets string) ([]string, error) { + keyIds, err := kw.GetKeyIdsFromPacket(b64pgpPackets) + if err != nil { + return nil, err + } + var array []string + for _, keyid := range keyIds { + array = append(array, "0x"+strconv.FormatUint(keyid, 16)) + } + return array, nil +} + +func (kw *gpgKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *gpgKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["gpg-privatekeys"] +} + +func (kw *gpgKeyWrapper) getKeyParameters(dcparameters map[string][][]byte) ([][]byte, [][]byte, error) { + + privKeys := kw.GetPrivateKeys(dcparameters) + if len(privKeys) == 0 { + return nil, nil, errors.New("GPG: Missing private key parameter") + } + + return privKeys, dcparameters["gpg-privatekeys-passwords"], nil +} + +// createEntityList creates the opengpg EntityList by reading the KeyRing +// first and then filtering out recipients' keys +func (kw *gpgKeyWrapper) createEntityList(ec *config.EncryptConfig) (openpgp.EntityList, error) { + pgpPubringFile := ec.Parameters["gpg-pubkeyringfile"] + if len(pgpPubringFile) == 0 { + return nil, nil + } + r := bytes.NewReader(pgpPubringFile[0]) + + entityList, err := openpgp.ReadKeyRing(r) + if err != nil { + return nil, err + } + + gpgRecipients := ec.Parameters["gpg-recipients"] + if len(gpgRecipients) == 0 { + return nil, nil + } + + rSet := make(map[string]int) + for _, r := range gpgRecipients { + rSet[string(r)] = 0 + } + + var filteredList openpgp.EntityList + for _, entity := range entityList { + for k := range entity.Identities { + addr, err := mail.ParseAddress(k) + if err != nil { + return nil, err + } + for _, r := range gpgRecipients { + recp := string(r) + if strings.Compare(addr.Name, recp) == 0 || strings.Compare(addr.Address, recp) == 0 { + filteredList = append(filteredList, entity) + rSet[recp] = rSet[recp] + 1 + } + } + } + } + + // make sure we found keys for all the Recipients... + var buffer bytes.Buffer + notFound := false + buffer.WriteString("PGP: No key found for the following recipients: ") + + for k, v := range rSet { + if v == 0 { + if notFound { + buffer.WriteString(", ") + } + buffer.WriteString(k) + notFound = true + } + } + + if notFound { + return nil, errors.New(buffer.String()) + } + + return filteredList, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go new file mode 100644 index 0000000000..b9a83c5360 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go @@ -0,0 +1,152 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "errors" + "fmt" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/crypto/pkcs11" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" +) + +type pkcs11KeyWrapper struct { +} + +func (kw *pkcs11KeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.pkcs11" +} + +// NewKeyWrapper returns a new key wrapping interface using pkcs11 +func NewKeyWrapper() keywrap.KeyWrapper { + return &pkcs11KeyWrapper{} +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *pkcs11KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + // append({}, ...) allocates a fresh backing array, and that's necessary to guarantee concurrent calls to WrapKeys (as in c/image/copy.Image) + // can't race writing to the same backing array. + pubKeys := append([][]byte{}, ec.Parameters["pkcs11-pubkeys"]...) // In Go 1.21, slices.Clone(ec.Parameters["pkcs11-pubkeys"]) + pubKeys = append(pubKeys, ec.Parameters["pkcs11-yamls"]...) + pkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, pubKeys) + if err != nil { + return nil, err + } + // no recipients is not an error... + if len(pkcs11Recipients) == 0 { + return nil, nil + } + + jsonString, err := pkcs11.EncryptMultiple(pkcs11Recipients, optsData) + if err != nil { + return nil, fmt.Errorf("PKCS11 EncryptMulitple failed: %w", err) + } + return jsonString, nil +} + +func (kw *pkcs11KeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byte) ([]byte, error) { + var pkcs11PrivKeys []*pkcs11.Pkcs11KeyFileObject + + privKeys := kw.GetPrivateKeys(dc.Parameters) + if len(privKeys) == 0 { + return nil, errors.New("No private keys found for PKCS11 decryption") + } + + p11conf, err := p11confFromParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for _, privKey := range privKeys { + key, err := utils.ParsePrivateKey(privKey, nil, "PKCS11") + if err != nil { + return nil, err + } + switch pkcs11PrivKey := key.(type) { + case *pkcs11.Pkcs11KeyFileObject: + if p11conf != nil { + pkcs11PrivKey.Uri.SetModuleDirectories(p11conf.ModuleDirectories) + pkcs11PrivKey.Uri.SetAllowedModulePaths(p11conf.AllowedModulePaths) + } + pkcs11PrivKeys = append(pkcs11PrivKeys, pkcs11PrivKey) + default: + continue + } + } + + plaintext, err := pkcs11.Decrypt(pkcs11PrivKeys, jsonString) + if err == nil { + return plaintext, nil + } + + return nil, fmt.Errorf("PKCS11: No suitable private key found for decryption: %w", err) +} + +func (kw *pkcs11KeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *pkcs11KeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["pkcs11-yamls"] +} + +func (kw *pkcs11KeyWrapper) GetKeyIdsFromPacket(_ string) ([]uint64, error) { + return nil, nil +} + +func (kw *pkcs11KeyWrapper) GetRecipients(_ string) ([]string, error) { + return []string{"[pkcs11]"}, nil +} + +func addPubKeys(dc *config.DecryptConfig, pubKeys [][]byte) ([]interface{}, error) { + var pkcs11Keys []interface{} + + if len(pubKeys) == 0 { + return pkcs11Keys, nil + } + + p11conf, err := p11confFromParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for _, pubKey := range pubKeys { + key, err := utils.ParsePublicKey(pubKey, "PKCS11") + if err != nil { + return nil, err + } + switch pkcs11PubKey := key.(type) { + case *pkcs11.Pkcs11KeyFileObject: + if p11conf != nil { + pkcs11PubKey.Uri.SetModuleDirectories(p11conf.ModuleDirectories) + pkcs11PubKey.Uri.SetAllowedModulePaths(p11conf.AllowedModulePaths) + } + } + pkcs11Keys = append(pkcs11Keys, key) + } + return pkcs11Keys, nil +} + +func p11confFromParameters(dcparameters map[string][][]byte) (*pkcs11.Pkcs11Config, error) { + if _, ok := dcparameters["pkcs11-config"]; ok { + return pkcs11.ParsePkcs11ConfigFile(dcparameters["pkcs11-config"][0]) + } + return nil, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go new file mode 100644 index 0000000000..7ca32fc802 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go @@ -0,0 +1,137 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs7 + +import ( + "crypto" + "crypto/x509" + "errors" + "fmt" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + "github.com/smallstep/pkcs7" +) + +type pkcs7KeyWrapper struct { +} + +// NewKeyWrapper returns a new key wrapping interface using jwe +func NewKeyWrapper() keywrap.KeyWrapper { + return &pkcs7KeyWrapper{} +} + +func (kw *pkcs7KeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.pkcs7" +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *pkcs7KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + x509Certs, err := collectX509s(ec.Parameters["x509s"]) + if err != nil { + return nil, err + } + // no recipients is not an error... + if len(x509Certs) == 0 { + return nil, nil + } + + pkcs7.ContentEncryptionAlgorithm = pkcs7.EncryptionAlgorithmAES128GCM + return pkcs7.Encrypt(optsData, x509Certs) +} + +func collectX509s(x509s [][]byte) ([]*x509.Certificate, error) { + if len(x509s) == 0 { + return nil, nil + } + var x509Certs []*x509.Certificate + for _, x509 := range x509s { + x509Cert, err := utils.ParseCertificate(x509, "PKCS7") + if err != nil { + return nil, err + } + x509Certs = append(x509Certs, x509Cert) + } + return x509Certs, nil +} + +func (kw *pkcs7KeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *pkcs7KeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys"] +} + +func (kw *pkcs7KeyWrapper) getPrivateKeysPasswords(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys-passwords"] +} + +// UnwrapKey unwraps the symmetric key with which the layer is encrypted +// This symmetric key is encrypted in the PKCS7 payload. +func (kw *pkcs7KeyWrapper) UnwrapKey(dc *config.DecryptConfig, pkcs7Packet []byte) ([]byte, error) { + privKeys := kw.GetPrivateKeys(dc.Parameters) + if len(privKeys) == 0 { + return nil, errors.New("no private keys found for PKCS7 decryption") + } + privKeysPasswords := kw.getPrivateKeysPasswords(dc.Parameters) + if len(privKeysPasswords) != len(privKeys) { + return nil, errors.New("private key password array length must be same as that of private keys") + } + + x509Certs, err := collectX509s(dc.Parameters["x509s"]) + if err != nil { + return nil, err + } + if len(x509Certs) == 0 { + return nil, errors.New("no x509 certificates found needed for PKCS7 decryption") + } + + p7, err := pkcs7.Parse(pkcs7Packet) + if err != nil { + return nil, fmt.Errorf("could not parse PKCS7 packet: %w", err) + } + + for idx, privKey := range privKeys { + key, err := utils.ParsePrivateKey(privKey, privKeysPasswords[idx], "PKCS7") + if err != nil { + return nil, err + } + for _, x509Cert := range x509Certs { + optsData, err := p7.Decrypt(x509Cert, crypto.PrivateKey(key)) + if err != nil { + continue + } + return optsData, nil + } + } + return nil, errors.New("PKCS7: No suitable private key found for decryption") +} + +// GetKeyIdsFromWrappedKeys converts the base64 encoded Packet to uint64 keyIds; +// We cannot do this with pkcs7 +func (kw *pkcs7KeyWrapper) GetKeyIdsFromPacket(b64pkcs7Packets string) ([]uint64, error) { + return nil, nil +} + +// GetRecipients converts the wrappedKeys to an array of recipients +// We cannot do this with pkcs7 +func (kw *pkcs7KeyWrapper) GetRecipients(b64pkcs7Packets string) ([]string, error) { + return []string{"[pkcs7]"}, nil +} diff --git a/vendor/github.com/containers/ocicrypt/reader.go b/vendor/github.com/containers/ocicrypt/reader.go new file mode 100644 index 0000000000..a93eec8e91 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/reader.go @@ -0,0 +1,40 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "io" +) + +type readerAtReader struct { + r io.ReaderAt + off int64 +} + +// ReaderFromReaderAt takes an io.ReaderAt and returns an io.Reader +func ReaderFromReaderAt(r io.ReaderAt) io.Reader { + return &readerAtReader{ + r: r, + off: 0, + } +} + +func (rar *readerAtReader) Read(p []byte) (n int, err error) { + n, err = rar.r.ReadAt(p, rar.off) + rar.off += int64(n) + return n, err +} diff --git a/vendor/github.com/containers/ocicrypt/utils/delayedreader.go b/vendor/github.com/containers/ocicrypt/utils/delayedreader.go new file mode 100644 index 0000000000..3b939bdea8 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/delayedreader.go @@ -0,0 +1,109 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "io" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// DelayedReader wraps a io.Reader and allows a client to use the Reader +// interface. The DelayedReader holds back some buffer to the client +// so that it can report any error that occurred on the Reader it wraps +// early to the client while it may still have held some data back. +type DelayedReader struct { + reader io.Reader // Reader to Read() bytes from and delay them + err error // error that occurred on the reader + buffer []byte // delay buffer + bufbytes int // number of bytes in the delay buffer to give to Read(); on '0' we return 'EOF' to caller + bufoff int // offset in the delay buffer to give to Read() +} + +// NewDelayedReader wraps a io.Reader and allocates a delay buffer of bufsize bytes +func NewDelayedReader(reader io.Reader, bufsize uint) io.Reader { + return &DelayedReader{ + reader: reader, + buffer: make([]byte, bufsize), + } +} + +// Read implements the io.Reader interface +func (dr *DelayedReader) Read(p []byte) (int, error) { + if dr.err != nil && dr.err != io.EOF { + return 0, dr.err + } + + // if we are completely drained, return io.EOF + if dr.err == io.EOF && dr.bufbytes == 0 { + return 0, io.EOF + } + + // only at the beginning we fill our delay buffer in an extra step + if dr.bufbytes < len(dr.buffer) && dr.err == nil { + dr.bufbytes, dr.err = FillBuffer(dr.reader, dr.buffer) + if dr.err != nil && dr.err != io.EOF { + return 0, dr.err + } + } + // dr.err != nil means we have EOF and can drain the delay buffer + // otherwise we need to still read from the reader + + var tmpbuf []byte + tmpbufbytes := 0 + if dr.err == nil { + tmpbuf = make([]byte, len(p)) + tmpbufbytes, dr.err = FillBuffer(dr.reader, tmpbuf) + if dr.err != nil && dr.err != io.EOF { + return 0, dr.err + } + } + + // copy out of the delay buffer into 'p' + tocopy1 := min(len(p), dr.bufbytes) + c1 := copy(p[:tocopy1], dr.buffer[dr.bufoff:]) + dr.bufoff += c1 + dr.bufbytes -= c1 + + c2 := 0 + // can p still hold more data? + if c1 < len(p) { + // copy out of the tmpbuf into 'p' + c2 = copy(p[tocopy1:], tmpbuf[:tmpbufbytes]) + } + + // if tmpbuf holds data we need to hold onto, copy them + // into the delay buffer + if tmpbufbytes-c2 > 0 { + // left-shift the delay buffer and append the tmpbuf's remaining data + dr.buffer = dr.buffer[dr.bufoff : dr.bufoff+dr.bufbytes] + dr.buffer = append(dr.buffer, tmpbuf[c2:tmpbufbytes]...) + dr.bufoff = 0 + dr.bufbytes = len(dr.buffer) + } + + var err error + if dr.bufbytes == 0 { + err = io.EOF + } + return c1 + c2, err +} diff --git a/vendor/github.com/containers/ocicrypt/utils/ioutils.go b/vendor/github.com/containers/ocicrypt/utils/ioutils.go new file mode 100644 index 0000000000..c6265168a1 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/ioutils.go @@ -0,0 +1,58 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "bytes" + "fmt" + "io" + "os/exec" +) + +// FillBuffer fills the given buffer with as many bytes from the reader as possible. It returns +// EOF if an EOF was encountered or any other error. +func FillBuffer(reader io.Reader, buffer []byte) (int, error) { + n, err := io.ReadFull(reader, buffer) + if err == io.ErrUnexpectedEOF { + return n, io.EOF + } + return n, err +} + +// first argument is the command, like cat or echo, +// the second is the list of args to pass to it +type CommandExecuter interface { + Exec(string, []string, []byte) ([]byte, error) +} + +type Runner struct{} + +// ExecuteCommand is used to execute a linux command line command and return the output of the command with an error if it exists. +func (r Runner) Exec(cmdName string, args []string, input []byte) ([]byte, error) { + var out bytes.Buffer + var stderr bytes.Buffer + stdInputBuffer := bytes.NewBuffer(input) + cmd := exec.Command(cmdName, args...) + cmd.Stdin = stdInputBuffer + cmd.Stdout = &out + cmd.Stderr = &stderr + err := cmd.Run() + if err != nil { + return nil, fmt.Errorf("Error while running command: %s. stderr: %s: %w", cmdName, stderr.String(), err) + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go new file mode 100644 index 0000000000..dc477d3cf8 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go @@ -0,0 +1,243 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: keyprovider.proto + +package keyprovider + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type KeyProviderKeyWrapProtocolInput struct { + KeyProviderKeyWrapProtocolInput []byte `protobuf:"bytes,1,opt,name=KeyProviderKeyWrapProtocolInput,proto3" json:"KeyProviderKeyWrapProtocolInput,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyProviderKeyWrapProtocolInput) Reset() { *m = KeyProviderKeyWrapProtocolInput{} } +func (m *KeyProviderKeyWrapProtocolInput) String() string { return proto.CompactTextString(m) } +func (*KeyProviderKeyWrapProtocolInput) ProtoMessage() {} +func (*KeyProviderKeyWrapProtocolInput) Descriptor() ([]byte, []int) { + return fileDescriptor_da74c8e785ad390c, []int{0} +} + +func (m *KeyProviderKeyWrapProtocolInput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Unmarshal(m, b) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Marshal(b, m, deterministic) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Merge(m, src) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Size() int { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Size(m) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_DiscardUnknown() { + xxx_messageInfo_KeyProviderKeyWrapProtocolInput.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyProviderKeyWrapProtocolInput proto.InternalMessageInfo + +func (m *KeyProviderKeyWrapProtocolInput) GetKeyProviderKeyWrapProtocolInput() []byte { + if m != nil { + return m.KeyProviderKeyWrapProtocolInput + } + return nil +} + +type KeyProviderKeyWrapProtocolOutput struct { + KeyProviderKeyWrapProtocolOutput []byte `protobuf:"bytes,1,opt,name=KeyProviderKeyWrapProtocolOutput,proto3" json:"KeyProviderKeyWrapProtocolOutput,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyProviderKeyWrapProtocolOutput) Reset() { *m = KeyProviderKeyWrapProtocolOutput{} } +func (m *KeyProviderKeyWrapProtocolOutput) String() string { return proto.CompactTextString(m) } +func (*KeyProviderKeyWrapProtocolOutput) ProtoMessage() {} +func (*KeyProviderKeyWrapProtocolOutput) Descriptor() ([]byte, []int) { + return fileDescriptor_da74c8e785ad390c, []int{1} +} + +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Unmarshal(m, b) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Marshal(b, m, deterministic) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Merge(m, src) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Size() int { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Size(m) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_DiscardUnknown() { + xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyProviderKeyWrapProtocolOutput proto.InternalMessageInfo + +func (m *KeyProviderKeyWrapProtocolOutput) GetKeyProviderKeyWrapProtocolOutput() []byte { + if m != nil { + return m.KeyProviderKeyWrapProtocolOutput + } + return nil +} + +func init() { + proto.RegisterType((*KeyProviderKeyWrapProtocolInput)(nil), "keyprovider.keyProviderKeyWrapProtocolInput") + proto.RegisterType((*KeyProviderKeyWrapProtocolOutput)(nil), "keyprovider.keyProviderKeyWrapProtocolOutput") +} + +func init() { + proto.RegisterFile("keyprovider.proto", fileDescriptor_da74c8e785ad390c) +} + +var fileDescriptor_da74c8e785ad390c = []byte{ + // 169 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0xcc, 0x4e, 0xad, 0x2c, + 0x28, 0xca, 0x2f, 0xcb, 0x4c, 0x49, 0x2d, 0xd2, 0x03, 0x32, 0x4a, 0xf2, 0x85, 0xb8, 0x91, 0x84, + 0x94, 0xb2, 0xb9, 0xe4, 0x81, 0xdc, 0x00, 0x28, 0xd7, 0x3b, 0xb5, 0x32, 0xbc, 0x28, 0xb1, 0x20, + 0x00, 0xa4, 0x2e, 0x39, 0x3f, 0xc7, 0x33, 0xaf, 0xa0, 0xb4, 0x44, 0xc8, 0x83, 0x4b, 0xde, 0x1b, + 0xbf, 0x12, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x42, 0xca, 0x94, 0xf2, 0xb8, 0x14, 0x70, + 0x5b, 0xe6, 0x5f, 0x5a, 0x02, 0xb2, 0xcd, 0x8b, 0x4b, 0xc1, 0x9b, 0x80, 0x1a, 0xa8, 0x75, 0x04, + 0xd5, 0x19, 0xbd, 0x62, 0xe4, 0x12, 0x42, 0x52, 0x14, 0x9c, 0x5a, 0x54, 0x96, 0x99, 0x9c, 0x2a, + 0x94, 0xc1, 0xc5, 0x0e, 0x52, 0x0c, 0x94, 0x11, 0xd2, 0xd1, 0x43, 0x0e, 0x1f, 0x02, 0x21, 0x21, + 0xa5, 0x4b, 0xa4, 0x6a, 0x88, 0xf5, 0x4a, 0x0c, 0x42, 0x59, 0x5c, 0x9c, 0xa1, 0x79, 0xf4, 0xb1, + 0xcb, 0x89, 0x37, 0x0a, 0x39, 0x62, 0x93, 0xd8, 0xc0, 0x91, 0x6d, 0x0c, 0x08, 0x00, 0x00, 0xff, + 0xff, 0x9a, 0x10, 0xcb, 0xf9, 0x01, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// KeyProviderServiceClient is the client API for KeyProviderService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyProviderServiceClient interface { + WrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) + UnWrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) +} + +type keyProviderServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewKeyProviderServiceClient(cc grpc.ClientConnInterface) KeyProviderServiceClient { + return &keyProviderServiceClient{cc} +} + +func (c *keyProviderServiceClient) WrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) { + out := new(KeyProviderKeyWrapProtocolOutput) + err := c.cc.Invoke(ctx, "/keyprovider.KeyProviderService/WrapKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyProviderServiceClient) UnWrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) { + out := new(KeyProviderKeyWrapProtocolOutput) + err := c.cc.Invoke(ctx, "/keyprovider.KeyProviderService/UnWrapKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyProviderServiceServer is the server API for KeyProviderService service. +type KeyProviderServiceServer interface { + WrapKey(context.Context, *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) + UnWrapKey(context.Context, *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) +} + +// UnimplementedKeyProviderServiceServer can be embedded to have forward compatible implementations. +type UnimplementedKeyProviderServiceServer struct { +} + +func (*UnimplementedKeyProviderServiceServer) WrapKey(ctx context.Context, req *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method WrapKey not implemented") +} +func (*UnimplementedKeyProviderServiceServer) UnWrapKey(ctx context.Context, req *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method UnWrapKey not implemented") +} + +func RegisterKeyProviderServiceServer(s *grpc.Server, srv KeyProviderServiceServer) { + s.RegisterService(&_KeyProviderService_serviceDesc, srv) +} + +func _KeyProviderService_WrapKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KeyProviderKeyWrapProtocolInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyProviderServiceServer).WrapKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/keyprovider.KeyProviderService/WrapKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyProviderServiceServer).WrapKey(ctx, req.(*KeyProviderKeyWrapProtocolInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyProviderService_UnWrapKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KeyProviderKeyWrapProtocolInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyProviderServiceServer).UnWrapKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/keyprovider.KeyProviderService/UnWrapKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyProviderServiceServer).UnWrapKey(ctx, req.(*KeyProviderKeyWrapProtocolInput)) + } + return interceptor(ctx, in, info, handler) +} + +var _KeyProviderService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "keyprovider.KeyProviderService", + HandlerType: (*KeyProviderServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "WrapKey", + Handler: _KeyProviderService_WrapKey_Handler, + }, + { + MethodName: "UnWrapKey", + Handler: _KeyProviderService_UnWrapKey_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "keyprovider.proto", +} diff --git a/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto new file mode 100644 index 0000000000..a71f0a5922 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package keyprovider; +option go_package = "keyprovider"; + +message keyProviderKeyWrapProtocolInput { + bytes KeyProviderKeyWrapProtocolInput = 1; +} + +message keyProviderKeyWrapProtocolOutput { + bytes KeyProviderKeyWrapProtocolOutput = 1; +} + +service KeyProviderService { + rpc WrapKey(keyProviderKeyWrapProtocolInput) returns (keyProviderKeyWrapProtocolOutput) {}; + rpc UnWrapKey(keyProviderKeyWrapProtocolInput) returns (keyProviderKeyWrapProtocolOutput) {}; +} \ No newline at end of file diff --git a/vendor/github.com/containers/ocicrypt/utils/testing.go b/vendor/github.com/containers/ocicrypt/utils/testing.go new file mode 100644 index 0000000000..050aa885e3 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/testing.go @@ -0,0 +1,174 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "time" +) + +// CreateRSAKey creates an RSA key +func CreateRSAKey(bits int) (*rsa.PrivateKey, error) { + key, err := rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, fmt.Errorf("rsa.GenerateKey failed: %w", err) + } + return key, nil +} + +// CreateECDSAKey creates an elliptic curve key for the given curve +func CreateECDSAKey(curve elliptic.Curve) (*ecdsa.PrivateKey, error) { + key, err := ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, fmt.Errorf("ecdsa.GenerateKey failed: %w", err) + } + return key, nil +} + +// CreateRSATestKey creates an RSA key of the given size and returns +// the public and private key in PEM or DER format +func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte, error) { + key, err := CreateRSAKey(bits) + if err != nil { + return nil, nil, err + } + + pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey) + if err != nil { + return nil, nil, fmt.Errorf("x509.MarshalPKIXPublicKey failed: %w", err) + } + privData := x509.MarshalPKCS1PrivateKey(key) + + // no more encoding needed for DER + if !pemencode { + return pubData, privData, nil + } + + publicKey := pem.EncodeToMemory(&pem.Block{ + Type: "PUBLIC KEY", + Bytes: pubData, + }) + + var block *pem.Block + + typ := "RSA PRIVATE KEY" + if len(password) > 0 { + block, err = x509.EncryptPEMBlock(rand.Reader, typ, privData, password, x509.PEMCipherAES256) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility + if err != nil { + return nil, nil, fmt.Errorf("x509.EncryptPEMBlock failed: %w", err) + } + } else { + block = &pem.Block{ + Type: typ, + Bytes: privData, + } + } + + privateKey := pem.EncodeToMemory(block) + + return publicKey, privateKey, nil +} + +// CreateECDSATestKey creates and elliptic curve key for the given curve and returns +// the public and private key in DER format +func CreateECDSATestKey(curve elliptic.Curve) ([]byte, []byte, error) { + key, err := CreateECDSAKey(curve) + if err != nil { + return nil, nil, err + } + + pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey) + if err != nil { + return nil, nil, fmt.Errorf("x509.MarshalPKIXPublicKey failed: %w", err) + } + + privData, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, nil, fmt.Errorf("x509.MarshalECPrivateKey failed: %w", err) + } + + return pubData, privData, nil +} + +// CreateTestCA creates a root CA for testing +func CreateTestCA() (*rsa.PrivateKey, *x509.Certificate, error) { + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, fmt.Errorf("rsa.GenerateKey failed: %w", err) + } + + ca := &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: "test-ca", + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(1, 0, 0), + IsCA: true, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + } + caCert, err := certifyKey(&key.PublicKey, ca, key, ca) + + return key, caCert, err +} + +// CertifyKey certifies a public key using the given CA's private key and cert; +// The certificate template for the public key is optional +func CertifyKey(pubbytes []byte, template *x509.Certificate, caKey *rsa.PrivateKey, caCert *x509.Certificate) (*x509.Certificate, error) { + pubKey, err := ParsePublicKey(pubbytes, "CertifyKey") + if err != nil { + return nil, err + } + return certifyKey(pubKey, template, caKey, caCert) +} + +func certifyKey(pub interface{}, template *x509.Certificate, caKey *rsa.PrivateKey, caCert *x509.Certificate) (*x509.Certificate, error) { + if template == nil { + template = &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: "testkey", + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour), + IsCA: false, + KeyUsage: x509.KeyUsageDigitalSignature, + BasicConstraintsValid: true, + } + } + + certDER, err := x509.CreateCertificate(rand.Reader, template, caCert, pub, caKey) + if err != nil { + return nil, fmt.Errorf("x509.CreateCertificate failed: %w", err) + } + + cert, err := x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("x509.ParseCertificate failed: %w", err) + } + + return cert, nil +} diff --git a/vendor/github.com/containers/ocicrypt/utils/utils.go b/vendor/github.com/containers/ocicrypt/utils/utils.go new file mode 100644 index 0000000000..f653f2efcb --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/utils.go @@ -0,0 +1,249 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "bytes" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "strings" + + "github.com/containers/ocicrypt/crypto/pkcs11" + "github.com/go-jose/go-jose/v4" + "golang.org/x/crypto/openpgp" +) + +// parseJWKPrivateKey parses the input byte array as a JWK and makes sure it's a private key +func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) { + jwk := jose.JSONWebKey{} + err := jwk.UnmarshalJSON(privKey) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err) + } + if jwk.IsPublic() { + return nil, fmt.Errorf("%s: JWK is not a private key", prefix) + } + return &jwk, nil +} + +// parseJWKPublicKey parses the input byte array as a JWK +func parseJWKPublicKey(privKey []byte, prefix string) (interface{}, error) { + jwk := jose.JSONWebKey{} + err := jwk.UnmarshalJSON(privKey) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err) + } + if !jwk.IsPublic() { + return nil, fmt.Errorf("%s: JWK is not a public key", prefix) + } + return &jwk, nil +} + +// parsePkcs11PrivateKeyYaml parses the input byte array as pkcs11 key file yaml format) +func parsePkcs11PrivateKeyYaml(yaml []byte, prefix string) (*pkcs11.Pkcs11KeyFileObject, error) { + // if the URI does not have enough attributes, we will throw an error when decrypting + return pkcs11.ParsePkcs11KeyFile(yaml) +} + +// parsePkcs11URIPublicKey parses the input byte array as a pkcs11 key file yaml +func parsePkcs11PublicKeyYaml(yaml []byte, prefix string) (*pkcs11.Pkcs11KeyFileObject, error) { + // if the URI does not have enough attributes, we will throw an error when decrypting + return pkcs11.ParsePkcs11KeyFile(yaml) +} + +// IsPasswordError checks whether an error is related to a missing or wrong +// password +func IsPasswordError(err error) bool { + if err == nil { + return false + } + msg := strings.ToLower(err.Error()) + + return strings.Contains(msg, "password") && + (strings.Contains(msg, "missing") || strings.Contains(msg, "wrong")) +} + +// ParsePrivateKey tries to parse a private key in DER format first and +// PEM format after, returning an error if the parsing failed +func ParsePrivateKey(privKey, privKeyPassword []byte, prefix string) (interface{}, error) { + key, err := x509.ParsePKCS8PrivateKey(privKey) + if err != nil { + key, err = x509.ParsePKCS1PrivateKey(privKey) + if err != nil { + key, err = x509.ParseECPrivateKey(privKey) + } + } + if err != nil { + block, _ := pem.Decode(privKey) + if block != nil { + var der []byte + if x509.IsEncryptedPEMBlock(block) { //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility + if privKeyPassword == nil { + return nil, fmt.Errorf("%s: Missing password for encrypted private key", prefix) + } + der, err = x509.DecryptPEMBlock(block, privKeyPassword) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility + if err != nil { + return nil, fmt.Errorf("%s: Wrong password: could not decrypt private key", prefix) + } + } else { + der = block.Bytes + } + + key, err = x509.ParsePKCS8PrivateKey(der) + if err != nil { + key, err = x509.ParsePKCS1PrivateKey(der) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse private key: %w", prefix, err) + } + } + } else { + key, err = parseJWKPrivateKey(privKey, prefix) + if err != nil { + key, err = parsePkcs11PrivateKeyYaml(privKey, prefix) + } + } + } + return key, err +} + +// IsPrivateKey returns true in case the given byte array represents a private key +// It returns an error if for example the password is wrong +func IsPrivateKey(data []byte, password []byte) (bool, error) { + _, err := ParsePrivateKey(data, password, "") + return err == nil, err +} + +// IsPkcs11PrivateKey returns true in case the given byte array represents a pkcs11 private key +func IsPkcs11PrivateKey(data []byte) bool { + return pkcs11.IsPkcs11PrivateKey(data) +} + +// ParsePublicKey tries to parse a public key in DER format first and +// PEM format after, returning an error if the parsing failed +func ParsePublicKey(pubKey []byte, prefix string) (interface{}, error) { + key, err := x509.ParsePKIXPublicKey(pubKey) + if err != nil { + block, _ := pem.Decode(pubKey) + if block != nil { + key, err = x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse public key: %w", prefix, err) + } + } else { + key, err = parseJWKPublicKey(pubKey, prefix) + if err != nil { + key, err = parsePkcs11PublicKeyYaml(pubKey, prefix) + } + } + } + return key, err +} + +// IsPublicKey returns true in case the given byte array represents a public key +func IsPublicKey(data []byte) bool { + _, err := ParsePublicKey(data, "") + return err == nil +} + +// IsPkcs11PublicKey returns true in case the given byte array represents a pkcs11 public key +func IsPkcs11PublicKey(data []byte) bool { + return pkcs11.IsPkcs11PublicKey(data) +} + +// ParseCertificate tries to parse a public key in DER format first and +// PEM format after, returning an error if the parsing failed +func ParseCertificate(certBytes []byte, prefix string) (*x509.Certificate, error) { + x509Cert, err := x509.ParseCertificate(certBytes) + if err != nil { + block, _ := pem.Decode(certBytes) + if block == nil { + return nil, fmt.Errorf("%s: Could not PEM decode x509 certificate", prefix) + } + x509Cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse x509 certificate: %w", prefix, err) + } + } + return x509Cert, err +} + +// IsCertificate returns true in case the given byte array represents an x.509 certificate +func IsCertificate(data []byte) bool { + _, err := ParseCertificate(data, "") + return err == nil +} + +// IsGPGPrivateKeyRing returns true in case the given byte array represents a GPG private key ring file +func IsGPGPrivateKeyRing(data []byte) bool { + r := bytes.NewBuffer(data) + _, err := openpgp.ReadKeyRing(r) + return err == nil +} + +// SortDecryptionKeys parses a list of comma separated base64 entries and sorts the data into +// a map. Each entry in the list may be either a GPG private key ring, private key, or x.509 +// certificate +func SortDecryptionKeys(b64ItemList string) (map[string][][]byte, error) { + dcparameters := make(map[string][][]byte) + + for _, b64Item := range strings.Split(b64ItemList, ",") { + var password []byte + b64Data := strings.Split(b64Item, ":") + keyData, err := base64.StdEncoding.DecodeString(b64Data[0]) + if err != nil { + return nil, errors.New("Could not base64 decode a passed decryption key") + } + if len(b64Data) == 2 { + password, err = base64.StdEncoding.DecodeString(b64Data[1]) + if err != nil { + return nil, errors.New("Could not base64 decode a passed decryption key password") + } + } + var key string + isPrivKey, err := IsPrivateKey(keyData, password) + if IsPasswordError(err) { + return nil, err + } + if isPrivKey { + key = "privkeys" + if _, ok := dcparameters["privkeys-passwords"]; !ok { + dcparameters["privkeys-passwords"] = [][]byte{password} + } else { + dcparameters["privkeys-passwords"] = append(dcparameters["privkeys-passwords"], password) + } + } else if IsCertificate(keyData) { + key = "x509s" + } else if IsGPGPrivateKeyRing(keyData) { + key = "gpg-privatekeys" + } + if key != "" { + values := dcparameters[key] + if values == nil { + dcparameters[key] = [][]byte{keyData} + } else { + dcparameters[key] = append(dcparameters[key], keyData) + } + } else { + return nil, errors.New("Unknown decryption key type") + } + } + + return dcparameters, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/README.md b/vendor/github.com/containers/storage/pkg/archive/README.md new file mode 100644 index 0000000000..7307d9694f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go new file mode 100644 index 0000000000..aee06d71c7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -0,0 +1,1628 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/promise" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + gzip "github.com/klauspost/pgzip" + "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + IgnoreChownErrors bool + ChownOpts *idtools.IDPair + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // This is additional data to be used by the converter. It will + // not survive a round trip through JSON, so it's primarily + // intended for generating archives (i.e., converting writes). + WhiteoutData any + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool + // ForceMask, if set, indicates the permission mask used for created files. + ForceMask *os.FileMode + // Timestamp, if set, will be set in each header as create/mod/access time + Timestamp *time.Time + } +) + +const PaxSchilyXattr = "SCHILY.xattr." + +const ( + tarExt = "tar" + solaris = "solaris" + windows = "windows" + darwin = "darwin" + freebsd = "freebsd" +) + +var xattrsToIgnore = map[string]any{ + "security.selinux": true, +} + +// Archiver allows the reuse of most utility functions of this package with a +// pluggable Untar function. To facilitate the passing of specific id mappings +// for untar, an archiver can be created with maps which will then be passed to +// Untar operations. If ChownOpts is set, its values are mapped using +// UntarIDMappings before being used to create files and directories on disk. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + TarIDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + UntarIDMappings *idtools.IDMappings +} + +// NewDefaultArchiver returns a new Archiver without any IDMappings +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, TarIDMappings: &idtools.IDMappings{}, UntarIDMappings: &idtools.IDMappings{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +// overwriteError is used to differentiate errors related to attempting to +// overwrite a directory with a non-directory or vice-versa. When testing +// copying a file over a directory, this error is expected in order for the +// test to pass. +type overwriteError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz + // Zstd is zstd compression algorithm. + Zstd +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + defer rdr.Close() + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + Zstd: {0x28, 0xb5, 0x2f, 0xfd}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (_ io.ReadCloser, Err error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + + defer func() { + if Err != nil { + // In the normal case, the buffer is embedded in the ReadCloser return. + p.Put(buf) + } + }() + + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + cleanup := func() { + p.Put(buf) + } + if rc, canUse := tryProcFilter([]string{"pigz", "-d"}, buf, cleanup); canUse { + return rc, nil + } + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, err := xz.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return readBufWrapper, nil + case Zstd: + cleanup := func() { + p.Put(buf) + } + if rc, canUse := tryProcFilter([]string{"zstd", "-d"}, buf, cleanup); canUse { + return rc, nil + } + return zstdReader(buf) + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (_ io.WriteCloser, Err error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + + defer func() { + if Err != nil { + p.Put(buf) + } + }() + + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Zstd: + return zstdWriter(dest) + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return tarExt + case Bzip2: + return tarExt + ".bz2" + case Gzip: + return tarExt + ".gz" + case Xz: + return tarExt + ".xz" + case Zstd: + return tarExt + ".zst" + } + return "" +} + +// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to +// prevent tar.FileInfoHeader from introspecting it and potentially calling into +// glibc. +type nosysFileInfo struct { + os.FileInfo +} + +func (fi nosysFileInfo) Sys() any { + // A Sys value of type *tar.Header is safe as it is system-independent. + // The tar.FileInfoHeader function copies the fields into the returned + // header without performing any OS lookups. + if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { + return sys + } + return nil +} + +// sysStatOverride, if non-nil, populates hdr from system-dependent fields of fi. +var sysStatOverride func(fi os.FileInfo, hdr *tar.Header) error + +func fileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { + if sysStatOverride == nil { + return tar.FileInfoHeader(fi, link) + } + hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) + if err != nil { + return nil, err + } + return hdr, sysStatOverride(fi, hdr) +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := fileInfoHeaderNoLookups(fi, link) + if err != nil { + return nil, err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return nil, fmt.Errorf("tar: cannot canonicalize path: %w", err) + } + hdr.Name = name + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// readSecurityXattrToTarHeader reads security.capability, security,image +// xattrs from filesystem to a tar header +func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + if hdr.PAXRecords == nil { + hdr.PAXRecords = make(map[string]string) + } + for _, xattr := range []string{"security.capability", "security.ima"} { + capability, err := system.Lgetxattr(path, xattr) + if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform { + return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err) + } + if capability != nil { + hdr.PAXRecords[PaxSchilyXattr+xattr] = string(capability) + } + } + return nil +} + +// readUserXattrToTarHeader reads user.* xattr from filesystem to a tar header +func readUserXattrToTarHeader(path string, hdr *tar.Header) error { + xattrs, err := system.Llistxattr(path) + if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform { + return err + } + for _, key := range xattrs { + if strings.HasPrefix(key, "user.") && !strings.HasPrefix(key, "user.overlay.") { + value, err := system.Lgetxattr(path, key) + if err != nil { + if errors.Is(err, system.E2BIG) { + logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", path, key) + continue + } + return err + } + if hdr.PAXRecords == nil { + hdr.PAXRecords = make(map[string]string) + } + hdr.PAXRecords[PaxSchilyXattr+key] = string(value) + } + } + return nil +} + +type TarWhiteoutHandler interface { + Setxattr(path, name string, value []byte) error + Mknod(path string, mode uint32, dev int) error + Chown(path string, uid, gid int) error +} + +type TarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) + ConvertReadWithHandler(*tar.Header, string, TarWhiteoutHandler) (bool, error) +} + +type tarWriter struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter TarWhiteoutConverter + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool + + // Timestamp, if set, will be set in each header as create/mod/access time + Timestamp *time.Time +} + +func newTarWriter(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair, timestamp *time.Time) *tarWriter { + return &tarWriter{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IDMappings: idMapping, + ChownOpts: chownOpts, + Timestamp: timestamp, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +// path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +// addFile adds a file from `path` as `name` to the tar archive. +func (ta *tarWriter) addFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + if fi.Mode()&os.ModeSocket != 0 { + logrus.Infof("archive: skipping %q since it is a socket", path) + return nil + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := readSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + if err := readUserXattrToTarHeader(path, hdr); err != nil { + return err + } + if err := ReadFileFlagsToTarHeader(path, hdr); err != nil { + return err + } + if ta.CopyPass { + copyPassHeader(hdr) + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + // handle re-mapping container ID mappings back to host ID mappings before + // writing tar headers/files. We skip whiteout files because they were written + // by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) + if err != nil { + return err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + // Don’t expose the user names from the local system; they probably don’t match the ta.ChownOpts value anyway, + // and they unnecessarily give recipients of the tar file potentially private data. + hdr.Uname = "" + hdr.Gname = "" + } + + // if override timestamp set, replace all times with this + if ta.Timestamp != nil { + hdr.ModTime = *ta.Timestamp + hdr.AccessTime = *ta.Timestamp + hdr.ChangeTime = *ta.Timestamp + } + + maybeTruncateHeaderModTime(hdr) + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + file, err := os.Open(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func extractTarFileEntry(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + typeFlag := hdr.Typeflag + mask := hdrInfo.Mode() + + // update also the implementation of ForceMask in pkg/chunked + if forceMask != nil { + mask = *forceMask + // If we have a forceMask, force the real type to either be a directory, + // a link, or a regular file. + if typeFlag != tar.TypeDir && typeFlag != tar.TypeSymlink && typeFlag != tar.TypeLink { + typeFlag = tar.TypeReg + } + } + + switch typeFlag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); err != nil || !fi.IsDir() { + if err := os.Mkdir(path, mask); err != nil { + return err + } + } + + case tar.TypeReg: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, mask) + if err != nil { + return err + } + if _, err := io.CopyBuffer(file, reader, buffer); err != nil { + file.Close() + return err + } + if err := file.Close(); err != nil { + return err + } + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + logrus.Debugf("Tar: Can't create device %v while running in user namespace", path) + return nil + } + fallthrough + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := handleLLink(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != windows { + if chownOpts == nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + err := idtools.SafeLchown(path, chownOpts.UID, chownOpts.GID) + if err != nil { + if ignoreChownErrors { + fmt.Fprintf(os.Stderr, "Chown error detected. Ignoring due to ignoreChownErrors flag: %v\n", err) + } else { + return err + } + } + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo, forceMask); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + + var errs []string + for key, value := range hdr.PAXRecords { + xattrKey, ok := strings.CutPrefix(key, PaxSchilyXattr) + if !ok { + continue + } + if _, found := xattrsToIgnore[xattrKey]; found { + continue + } + if err := system.Lsetxattr(path, xattrKey, []byte(value), 0); err != nil { + if errors.Is(err, system.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { + // Ignore specific error cases: + // - ENOTSUP: Expected for graphdrivers lacking extended attribute support: + // - Legacy AUFS versions + // - FreeBSD with unsupported namespaces (trusted, security) + // - EPERM: Expected when operating within a user namespace + // All other errors will cause a failure. + errs = append(errs, err.Error()) + continue + } + return err + } + } + + if forceMask != nil && (typeFlag == tar.TypeReg || typeFlag == tar.TypeDir || runtime.GOOS == "darwin") { + value := idtools.Stat{ + IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}, + Mode: hdrInfo.Mode(), + Major: int(hdr.Devmajor), + Minor: int(hdr.Devminor), + } + if err := idtools.SetContainersOverrideXattr(path, value); err != nil { + return err + } + } + + // We defer setting flags on directories until the end of + // Unpack or UnpackLayer in case setting them makes the + // directory immutable. + if hdr.Typeflag != tar.TypeDir { + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return err + } + } + + if len(errs) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errs, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + tarWithOptionsTo := func(dest io.WriteCloser, srcPath string, options *TarOptions) (result error) { + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + defer func() { + if err := dest.Close(); err != nil && result == nil { + result = err + } + }() + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return err + } + + compressWriter, err := CompressStream(dest, options.Compression) + if err != nil { + return err + } + + ta := newTarWriter( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + options.Timestamp, + ) + ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + ta.CopyPass = options.CopyPass + + includeFiles := options.IncludeFiles + defer func() { + if err := compressWriter.Close(); err != nil && result == nil { + result = err + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return err + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(includeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + includeFiles = []string{base} + } + + if len(includeFiles) == 0 { + includeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range includeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + if err := filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil //nolint: nilerr + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + matches, err := pm.IsMatch(relFilePath) + if err != nil { + return fmt.Errorf("matching %s: %w", relFilePath, err) + } + skip = matches + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !d.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }); err != nil { + return err + } + } + return ta.TarWriter.Close() + } + + pipeReader, pipeWriter := io.Pipe() + go func() { + err := tarWithOptionsTo(pipeWriter, srcPath, options) + if pipeErr := pipeWriter.CloseWithError(err); pipeErr != nil { + logrus.Errorf("Can't close pipe writer: %s", pipeErr) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + whiteoutConverter := GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + buffer := make([]byte, 1<<20) + + doChown := !options.NoLchown + if options.ForceMask != nil { + // if ForceMask is in place, make sure lchown is disabled. + doChown = false + } + var rootHdr *tar.Header + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if err := fileutils.Lexists(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllAndChownNew(parentPath, 0o777, rootIDs) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if rel == "." { + rootHdr = hdr + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return overwriteError(fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return overwriteError(fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !fi.IsDir() || hdr.Typeflag != tar.TypeDir { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + chownOpts := options.ChownOpts + if err := remapIDs(nil, idMappings, chownOpts, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if chownOpts != nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + + if err = extractTarFileEntry(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return err + } + } + + if options.ForceMask != nil { + value := idtools.Stat{Mode: os.ModeDir | os.FileMode(0o755)} + if rootHdr != nil { + value.IDs.UID = rootHdr.Uid + value.IDs.GID = rootHdr.Gid + value.Mode = os.ModeDir | os.FileMode(rootHdr.Mode) + } + if err := idtools.SetContainersOverrideXattr(dest, value); err != nil { + return err + } + } + + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// +// identity (uncompressed), gzip, bzip2, xz. +// +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + tarMappings := archiver.TarIDMappings + if tarMappings == nil { + tarMappings = &idtools.IDMappings{} + } + options := &TarOptions{ + UIDMaps: tarMappings.UIDs(), + GIDMaps: tarMappings.GIDs(), + Compression: Uncompressed, + CopyPass: true, + InUserNS: unshare.IsRootless(), + } + archive, err := TarWithOptions(src, options) + if err != nil { + return err + } + defer archive.Close() + untarMappings := archiver.UntarIDMappings + if untarMappings == nil { + untarMappings = &idtools.IDMappings{} + } + options = &TarOptions{ + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: unshare.IsRootless(), + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + untarMappings := archiver.UntarIDMappings + if untarMappings == nil { + untarMappings = &idtools.IDMappings{} + } + options := &TarOptions{ + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: unshare.IsRootless(), + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.UntarIDMappings.RootPair() + if archiver.ChownOpts != nil { + rootIDs = *archiver.ChownOpts + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + copyPassHeader(hdr) + + if err := remapIDs(archiver.TarIDMappings, nil, archiver.ChownOpts, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + options := &TarOptions{ + UIDMaps: archiver.UntarIDMappings.UIDs(), + GIDMaps: archiver.UntarIDMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: unshare.IsRootless(), + NoOverwriteDirNonDir: true, + } + err = archiver.Untar(r, filepath.Dir(dst), options) + if err != nil { + r.CloseWithError(err) + } + return err +} + +func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, hdr *tar.Header) (err error) { + var uid, gid int + if chownOpts != nil { + uid, gid = chownOpts.UID, chownOpts.GID + } else { + if readIDMappings != nil && !readIDMappings.Empty() { + uid, gid, err = readIDMappings.ToContainer(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + if err != nil { + return err + } + } else if runtime.GOOS == darwin { + uid, gid = hdr.Uid, hdr.Gid + if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok { + attrs := strings.Split(xstat, ":") + if len(attrs) >= 3 { + val, err := strconv.ParseUint(attrs[0], 10, 32) + if err != nil { + uid = int(val) + } + val, err = strconv.ParseUint(attrs[1], 10, 32) + if err != nil { + gid = int(val) + } + } + } + } else { + uid, gid = hdr.Uid, hdr.Gid + } + } + ids := idtools.IDPair{UID: uid, GID: gid} + if writeIDMappings != nil && !writeIDMappings.Empty() { + ids, err = writeIDMappings.ToHost(ids) + if err != nil { + return err + } + } + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := os.CreateTemp(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewReader(header)) + _, err := r.Next() + return err == nil +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return NewDefaultArchiver().UntarPath(src, dst) +} + +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) + +// NewArchiver returns a new Archiver +func NewArchiver(idMappings *idtools.IDMappings) *Archiver { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return &Archiver{Untar: Untar, TarIDMappings: idMappings, UntarIDMappings: idMappings} +} + +// NewArchiverWithChown returns a new Archiver which uses Untar and the provided ID mapping configuration on both ends +func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *Archiver { + if tarIDMappings == nil { + tarIDMappings = &idtools.IDMappings{} + } + if untarIDMappings == nil { + untarIDMappings = &idtools.IDMappings{} + } + return &Archiver{Untar: Untar, TarIDMappings: tarIDMappings, ChownOpts: chownOpts, UntarIDMappings: untarIDMappings} +} + +// CopyFileWithTarAndChown returns a function which copies a single file from outside +// of any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + contentReader, contentWriter, err := os.Pipe() + if err != nil { + return fmt.Errorf("creating pipe extract data to %q: %w", dest, err) + } + defer contentReader.Close() + defer contentWriter.Close() + var hashError error + var hashWorker sync.WaitGroup + hashWorker.Add(1) + go func() { + t := tar.NewReader(contentReader) + _, err := t.Next() + if err != nil { + hashError = err + } + if _, err = io.Copy(hasher, t); err != nil && err != io.EOF { + hashError = err + } + hashWorker.Done() + }() + if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { + err = fmt.Errorf("extracting data to %q while copying: %w", dest, err) + } + hashWorker.Wait() + if err == nil && hashError != nil { + err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError) + } + return err + } + } + return archiver.CopyFileWithTar +} + +// CopyWithTarAndChown returns a function which copies a directory tree from outside of +// any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.CopyWithTar +} + +// UntarPathAndChown returns a function which extracts an archive in a specified +// location into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.UntarPath +} + +// TarPath returns a function which creates an archive of a specified +// location in the container's filesystem, mapping permissions using the +// container's ID maps +func TarPath(uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(path string) (io.ReadCloser, error) { + tarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + return func(path string) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{ + Compression: Uncompressed, + UIDMaps: tarMappings.UIDs(), + GIDMaps: tarMappings.GIDs(), + }) + } +} + +// GetOverlayXattrName returns the xattr used by the overlay driver with the +// given name. +// It uses the trusted.overlay prefix when running as root, and user.overlay +// in rootless mode. +func GetOverlayXattrName(name string) string { + if unshare.IsRootless() { + return fmt.Sprintf("user.overlay.%s", name) + } + return fmt.Sprintf("trusted.overlay.%s", name) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_110.go b/vendor/github.com/containers/storage/pkg/archive/archive_110.go new file mode 100644 index 0000000000..db614cdd6b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_110.go @@ -0,0 +1,22 @@ +//go:build go1.10 + +package archive + +import ( + "archive/tar" + "time" +) + +func copyPassHeader(hdr *tar.Header) { + hdr.Format = tar.FormatPAX +} + +func maybeTruncateHeaderModTime(hdr *tar.Header) { + if hdr.Format == tar.FormatUnknown { + // one of the first things archive/tar does is round this + // value, possibly up, if the format isn't specified, while we + // are much better equipped to handle truncation when scanning + // for changes between source and an extracted copy of this + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + } +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_19.go b/vendor/github.com/containers/storage/pkg/archive/archive_19.go new file mode 100644 index 0000000000..304464fe78 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_19.go @@ -0,0 +1,13 @@ +//go:build !go1.10 + +package archive + +import ( + "archive/tar" +) + +func copyPassHeader(hdr *tar.Header) { +} + +func maybeTruncateHeaderModTime(hdr *tar.Header) { +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go new file mode 100644 index 0000000000..74e62331a6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go @@ -0,0 +1,18 @@ +//go:build netbsd || freebsd || darwin + +package archive + +import ( + "archive/tar" + "os" + + "golang.org/x/sys/unix" +) + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + permissionsMask := hdrInfo.Mode() + if forceMask != nil { + permissionsMask = *forceMask + } + return unix.Fchmodat(unix.AT_FDCWD, path, uint32(permissionsMask), unix.AT_SYMLINK_NOFOLLOW) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go new file mode 100644 index 0000000000..b3245f7fd9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -0,0 +1,208 @@ +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func getOverlayOpaqueXattrName() string { + return GetOverlayXattrName("opaque") +} + +func GetWhiteoutConverter(format WhiteoutFormat, data any) TarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + if rolayers, ok := data.([]string); ok && len(rolayers) > 0 { + return overlayWhiteoutConverter{rolayers: rolayers} + } + return overlayWhiteoutConverter{rolayers: nil} + } + return nil +} + +type overlayWhiteoutConverter struct { + rolayers []string +} + +func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the whiteout prefix + opaque, err := system.Lgetxattr(path, getOverlayOpaqueXattrName()) + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.PAXRecords != nil { + delete(hdr.PAXRecords, PaxSchilyXattr+getOverlayOpaqueXattrName()) + } + // If there are no lower layers, then it can't have been deleted in this layer. + if len(o.rolayers) == 0 { + return nil, nil //nolint: nilnil + } + // At this point, we have a directory that's opaque. If it appears in one of the lower + // layers, then it was newly-created here, so it wasn't also deleted here. + for _, rolayer := range o.rolayers { + stat, statErr := os.Stat(filepath.Join(rolayer, hdr.Name)) + if statErr != nil && !os.IsNotExist(statErr) && !isENOTDIR(statErr) { + // Not sure what happened here. + return nil, statErr + } + if statErr == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return nil, nil //nolint: nilnil + } + } + // It's not whiteout, so it was there in the older layer, so we need to + // add a whiteout for this item in this layer. + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + break + } + for dir := filepath.Dir(hdr.Name); dir != "" && dir != "." && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + // Check for whiteout for a parent directory in a parent layer. + stat, statErr := os.Stat(filepath.Join(rolayer, dir)) + if statErr != nil && !os.IsNotExist(statErr) && !isENOTDIR(statErr) { + // Not sure what happened here. + return nil, statErr + } + if statErr == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + // If it's whiteout for a parent directory, then the + // original directory wasn't inherited into this layer, + // so we don't need to emit whiteout for it. + if isWhiteOut(stat) { + return nil, nil //nolint: nilnil + } + } + } + } + } + } + } + + return +} + +func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path string, handler TarWhiteoutHandler) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := handler.Setxattr(dir, getOverlayOpaqueXattrName(), []byte{'y'}) + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if originalBase, ok := strings.CutPrefix(base, WhiteoutPrefix); ok { + originalPath := filepath.Join(dir, originalBase) + + if err := handler.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + // If someone does: + // rm -rf /foo/bar + // in an image, some tools will generate a layer with: + // /.wh.foo + // /foo/.wh.bar + // and when doing the second mknod(), we will fail with + // ENOTDIR, since the previous /foo was mknod()'d as a + // character device node and not a directory. + if isENOTDIR(err) { + return false, nil + } + return false, err + } + if err := handler.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} + +type directHandler struct{} + +func (d directHandler) Setxattr(path, name string, value []byte) error { + return unix.Setxattr(path, name, value, 0) +} + +func (d directHandler) Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +func (d directHandler) Chown(path string, uid, gid int) error { + return idtools.SafeChown(path, uid, gid) +} + +func (o overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + var handler directHandler + return o.ConvertReadWithHandler(hdr, path, handler) +} + +func isWhiteOut(stat os.FileInfo) bool { + s := stat.Sys().(*syscall.Stat_t) + return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 //nolint:unconvert +} + +func GetFileOwner(path string) (uint32, uint32, uint32, error) { + f, err := os.Stat(path) + if err != nil { + return 0, 0, 0, err + } + s, ok := f.Sys().(*syscall.Stat_t) + if ok { + return s.Uid, s.Gid, s.Mode & 0o7777, nil + } + return 0, 0, uint32(f.Mode()), nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + permissionsMask := hdrInfo.Mode() + if forceMask != nil { + permissionsMask = *forceMask + } + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go new file mode 100644 index 0000000000..b342ff75ee --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_other.go @@ -0,0 +1,11 @@ +//go:build !linux + +package archive + +func GetWhiteoutConverter(format WhiteoutFormat, data interface{}) TarWhiteoutConverter { + return nil +} + +func GetFileOwner(path string) (uint32, uint32, uint32, error) { + return 0, 0, 0, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go new file mode 100644 index 0000000000..3a02a88c14 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -0,0 +1,136 @@ +//go:build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func init() { + sysStatOverride = statUnix +} + +// statUnix populates hdr from system-dependent fields of fi without performing +// any OS lookups. +// Adapted from Moby. +func statUnix(fi os.FileInfo, hdr *tar.Header) error { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + + hdr.Uid = int(s.Uid) + hdr.Gid = int(s.Gid) + + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert + } + + return nil +} + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat any) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) //nolint: unconvert + hdr.Devminor = int64(minor(uint64(s.Rdev))) //nolint: unconvert + } + } + + return +} + +func getInodeFromStat(stat any) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat any) (idtools.IDPair, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + mode := uint32(hdr.Mode & 0o7777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return system.Mknod(path, mode, system.Mkdev(hdr.Devmajor, hdr.Devminor)) +} + +// Hardlink without symlinks +func handleLLink(targetPath, path string) error { + // Note: on Linux, the link syscall will not follow symlinks. + // This behavior is implementation-dependent since + // POSIX.1-2008 so to make it clear that we need non-symlink + // following here we use the linkat syscall which has a flags + // field to select symlink following or not. + return unix.Linkat(unix.AT_FDCWD, targetPath, unix.AT_FDCWD, path, 0) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go new file mode 100644 index 0000000000..6db31cf4cf --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go @@ -0,0 +1,83 @@ +//go:build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + // perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0o111 + permPart &= 0o755 + + return noPermPart | permPart +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + // no notion of file ownership mapping yet on Windows + return idtools.IDPair{0, 0}, nil +} + +// Hardlink without following symlinks +func handleLLink(targetPath string, path string) error { + return os.Link(targetPath, path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_zstd.go b/vendor/github.com/containers/storage/pkg/archive/archive_zstd.go new file mode 100644 index 0000000000..36b7118aa8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_zstd.go @@ -0,0 +1,41 @@ +package archive + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type wrapperZstdDecoder struct { + decoder *zstd.Decoder +} + +func (w *wrapperZstdDecoder) Close() error { + w.decoder.Close() + return nil +} + +func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { + return w.decoder.DecodeAll(input, dst) +} + +func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { + return w.decoder.Read(p) +} + +func (w *wrapperZstdDecoder) Reset(r io.Reader) error { + return w.decoder.Reset(r) +} + +func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { + return w.decoder.WriteTo(wr) +} + +func zstdReader(buf io.Reader) (io.ReadCloser, error) { + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err +} + +func zstdWriter(dest io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go new file mode 100644 index 0000000000..a5847cca54 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -0,0 +1,499 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "maps" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "syscall" + "time" + + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// changesByPath implements sort.Interface. +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a.Equal(b) || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip, aufsWhiteoutPresent) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if originalFile, ok := strings.CutPrefix(f, WhiteoutPrefix); ok { + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +func aufsWhiteoutPresent(root, path string) (bool, error) { + f := filepath.Join(filepath.Dir(path), WhiteoutPrefix+filepath.Base(path)) + err := fileutils.Exists(filepath.Join(root, f)) + if err == nil { + return true, nil + } + if os.IsNotExist(err) || isENOTDIR(err) { + return false, nil + } + return false, err +} + +func isENOTDIR(err error) bool { + if err == nil { + return false + } + if err == syscall.ENOTDIR { + return true + } + if perror, ok := err.(*os.PathError); ok { + if errno, ok := perror.Err.(syscall.Errno); ok { + return errno == syscall.ENOTDIR + } + } + return false +} + +type ( + skipChange func(string) (bool, error) + deleteChange func(string, string, os.FileInfo) (string, error) + whiteoutChange func(string, string) (bool, error) +) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange, wc whiteoutChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + layerScan: + for _, layer := range layers { + if wc != nil { + // ...Unless a lower layer also had whiteout for this directory or one of its parents, + // in which case, it's new + ignore, err := wc(layer, path) + if err != nil { + return err + } + if ignore { + break layerScan + } + for dir := filepath.Dir(path); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + ignore, err = wc(layer, dir) + if err != nil { + return err + } + if ignore { + break layerScan + } + } + } + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + tail := []Change{} + for parent != "/" { + if _, ok := changedDirs[parent]; !ok && parent != "/" { + tail = append([]Change{{Path: parent, Kind: ChangeModify}}, tail...) + changedDirs[parent] = struct{}{} + } + parent = filepath.Dir(parent) + } + changes = append(changes, tail...) + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + idMappings *idtools.IDMappings + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool + xattrs map[string]string + target string +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + maps.Copy(oldChildren, oldInfo.children) + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, oldInfo, newStat, info) || + !bytes.Equal(oldChild.capability, newChild.capability) || + oldChild.target != newChild.target || + !reflect.DeepEqual(oldChild.xattrs, newChild.xattrs) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + idMappings: idMappings, + children: make(map[string]*FileInfo), + target: "", + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string, oldMappings *idtools.IDMappings) ([]Change, error) { + var oldRoot, newRoot *FileInfo + if oldDir == "" { + emptyDir, err := os.MkdirTemp("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir, oldMappings, newMappings) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarWriter(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil, nil) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go new file mode 100644 index 0000000000..95284bb81b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go @@ -0,0 +1,404 @@ +package archive + +import ( + "bytes" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo + idmap1 *idtools.IDMappings //nolint:unused + idmap2 *idtools.IDMappings //nolint:unused +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string, idmap1, idmap2 *idtools.IDMappings) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(idmap1), + root2: newRootFileInfo(idmap2), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + idMappings: root.idMappings, + target: "", + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, err = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + if err != nil && !errors.Is(err, system.ENOTSUP) { + return err + } + xattrs, err := system.Llistxattr(cpath) + if err != nil && !errors.Is(err, system.ENOTSUP) { + return err + } + for _, key := range xattrs { + if strings.HasPrefix(key, "user.") { + value, err := system.Lgetxattr(cpath, key) + if err != nil { + if errors.Is(err, system.E2BIG) { + logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", cpath, key) + continue + } + return err + } + if info.xattrs == nil { + info.xattrs = make(map[string]string) + } + info.xattrs[key] = string(value) + } + } + if fi.Mode()&os.ModeSymlink != 0 { + info.target, err = os.Readlink(cpath) + if err != nil { + return err + } + } + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for ix1 < len(names1) && ix2 < len(names2) { + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + builder := make([]byte, 0, dirent.Reclen) + for i := range len(dirent.Name) { + if dirent.Name[i] == 0 { + break + } + builder = append(builder, byte(dirent.Name[i])) + } + name := string(builder) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + dc := func(root, path string, fi os.FileInfo) (string, error) { + r, err := overlayDeletedFile(layers, root, path, fi) + if err != nil { + return "", fmt.Errorf("overlay deleted file query: %w", err) + } + return r, nil + } + return changes(layers, rw, dc, nil, overlayLowerContainsWhiteout) +} + +func overlayLowerContainsWhiteout(root, path string) (bool, error) { + // Whiteout for a file or directory has the same name, but is for a character + // device with major/minor of 0/0. + stat, err := os.Stat(filepath.Join(root, path)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return false, err + } + if err == nil && stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return true, nil + } + } + return false, nil +} + +func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (string, error) { + // If it's a whiteout item, then a file or directory with that name is removed by this layer. + if fi.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(fi) { + return path, nil + } + } + // After this we only need to pay attention to directories. + if !fi.IsDir() { + return "", nil + } + // If the directory isn't marked as opaque, then it's just a normal directory. + opaque, err := system.Lgetxattr(filepath.Join(root, path), getOverlayOpaqueXattrName()) + if err != nil { + return "", fmt.Errorf("failed querying overlay opaque xattr: %w", err) + } + if len(opaque) != 1 || opaque[0] != 'y' { + return "", err + } + // If there are no lower layers, then it can't have been deleted and recreated in this layer. + if len(layers) == 0 { + return "", err + } + // At this point, we have a directory that's opaque. If it appears in one of the lower + // layers, then it was newly-created here, so it wasn't also deleted here. + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return "", err + } + if err == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return "", nil + } + } + // It's not whiteout, so it was there in the older layer, so it has to be + // marked as deleted in this layer. + return path, nil + } + for dir := filepath.Dir(path); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + // Check for whiteout for a parent directory. + stat, err := os.Stat(filepath.Join(layer, dir)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return "", err + } + if err == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return "", nil + } + } + } + } + } + + // We didn't find the same path in any older layers, so it was new in this one. + return "", nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go new file mode 100644 index 0000000000..2965ccc9fc --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -0,0 +1,112 @@ +//go:build !linux + +package archive + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtools.IDMappings) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir, oldIDMap) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir, newIDMap) + errs <- err2 + }() + + // block until both routines have returned + for range 2 { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) { + root := newRootFileInfo(idMappings) + + sourceStat, err := system.Lstat(sourceDir) + if err != nil { + return nil, err + } + + err = filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + idMappings: idMappings, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + + // Don't cross mount points. This ignores file mounts to avoid + // generating a diff which deletes all files following the + // mount. + if s.Dev() != sourceStat.Dev() && s.IsDir() { + return filepath.SkipDir + } + + info.stat = s + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go new file mode 100644 index 0000000000..fb2cb70c26 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go @@ -0,0 +1,51 @@ +//go:build !windows + +package archive + +import ( + "os" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { + // Don't look at size for dirs, its not a good measure of change + oldUID, oldGID := oldStat.UID(), oldStat.GID() + uid, gid := newStat.UID(), newStat.GID() + if cuid, cgid, err := newInfo.idMappings.ToContainer(idtools.IDPair{UID: int(uid), GID: int(gid)}); err == nil { + uid = uint32(cuid) + gid = uint32(cgid) + if oldInfo != nil { + if oldcuid, oldcgid, err := oldInfo.idMappings.ToContainer(idtools.IDPair{UID: int(oldUID), GID: int(oldGID)}); err == nil { + oldUID = uint32(oldcuid) + oldGID = uint32(oldcgid) + } + } + } + ownerChanged := uid != oldUID || gid != oldGID + if oldStat.Mode() != newStat.Mode() || + ownerChanged || + oldStat.Rdev() != newStat.Rdev() || + oldStat.Flags() != newStat.Flags() || + !sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || + // Don't look at size for dirs, its not a good measure of change + ((oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR) && (oldStat.Size() != newStat.Size())) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return fi.Sys().(*syscall.Stat_t).Ino +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go new file mode 100644 index 0000000000..1bab94fa59 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go @@ -0,0 +1,29 @@ +package archive + +import ( + "os" + + "github.com/containers/storage/pkg/system" +) + +func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mtim() != newStat.Mtim() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go new file mode 100644 index 0000000000..4d46167d70 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy.go @@ -0,0 +1,459 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/fileutils" + "github.com/sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if err = fileutils.Lexists(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !filepath.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, io.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go new file mode 100644 index 0000000000..f579282449 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +//go:build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_windows.go b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go new file mode 100644 index 0000000000..2b775b45c4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go new file mode 100644 index 0000000000..6f3067770c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -0,0 +1,274 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + buffer := make([]byte, 1<<20) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == windows { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if err := fileutils.Lexists(parentPath); err != nil && os.IsNotExist(err) { + err = os.MkdirAll(parentPath, 0o755) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = os.MkdirTemp("", "storageplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := extractTarFileEntry(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + err := fileutils.Lexists(dir) + if err != nil { + return 0, err + } + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + if err := resetImmutable(path, nil); err != nil { + return err + } + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := resetImmutable(originalPath, nil); err != nil { + return 0, err + } + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + // + // We always reset the immutable flag (if present) to allow metadata + // changes and to allow directory modification. The flag will be + // re-applied based on the contents of hdr either at the end for + // directories or in extractTarFileEntry otherwise. + if fi, err := os.Lstat(path); err == nil { + if err := resetImmutable(path, &fi); err != nil { + return 0, err + } + if !fi.IsDir() || hdr.Typeflag != tar.TypeDir { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(nil, idMappings, options.ChownOpts, srcHdr); err != nil { + return 0, err + } + + if err := extractTarFileEntry(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer func() { + _, _ = system.Umask(oldmask) // Ignore err. This can only fail with ErrNotSupportedPlatform, in which case we would have failed above. + }() + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go new file mode 100644 index 0000000000..4da1ced3e0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go @@ -0,0 +1,166 @@ +//go:build freebsd + +package archive + +import ( + "archive/tar" + "fmt" + "math/bits" + "os" + "strings" + "syscall" + + "github.com/containers/storage/pkg/system" +) + +const ( + paxSCHILYFflags = "SCHILY.fflags" +) + +var ( + flagNameToValue = map[string]uint32{ + "sappnd": system.SF_APPEND, + "sappend": system.SF_APPEND, + "arch": system.SF_ARCHIVED, + "archived": system.SF_ARCHIVED, + "schg": system.SF_IMMUTABLE, + "schange": system.SF_IMMUTABLE, + "simmutable": system.SF_IMMUTABLE, + "sunlnk": system.SF_NOUNLINK, + "sunlink": system.SF_NOUNLINK, + "snapshot": system.SF_SNAPSHOT, + "uappnd": system.UF_APPEND, + "uappend": system.UF_APPEND, + "uarch": system.UF_ARCHIVE, + "uarchive": system.UF_ARCHIVE, + "hidden": system.UF_HIDDEN, + "uhidden": system.UF_HIDDEN, + "uchg": system.UF_IMMUTABLE, + "uchange": system.UF_IMMUTABLE, + "uimmutable": system.UF_IMMUTABLE, + "uunlnk": system.UF_NOUNLINK, + "uunlink": system.UF_NOUNLINK, + "offline": system.UF_OFFLINE, + "uoffline": system.UF_OFFLINE, + "opaque": system.UF_OPAQUE, + "rdonly": system.UF_READONLY, + "urdonly": system.UF_READONLY, + "readonly": system.UF_READONLY, + "ureadonly": system.UF_READONLY, + "reparse": system.UF_REPARSE, + "ureparse": system.UF_REPARSE, + "sparse": system.UF_SPARSE, + "usparse": system.UF_SPARSE, + "system": system.UF_SYSTEM, + "usystem": system.UF_SYSTEM, + } + // Only include the short names for the reverse map + flagValueToName = map[uint32]string{ + system.SF_APPEND: "sappnd", + system.SF_ARCHIVED: "arch", + system.SF_IMMUTABLE: "schg", + system.SF_NOUNLINK: "sunlnk", + system.SF_SNAPSHOT: "snapshot", + system.UF_APPEND: "uappnd", + system.UF_ARCHIVE: "uarch", + system.UF_HIDDEN: "hidden", + system.UF_IMMUTABLE: "uchg", + system.UF_NOUNLINK: "uunlnk", + system.UF_OFFLINE: "offline", + system.UF_OPAQUE: "opaque", + system.UF_READONLY: "rdonly", + system.UF_REPARSE: "reparse", + system.UF_SPARSE: "sparse", + system.UF_SYSTEM: "system", + } +) + +func parseFileFlags(fflags string) (uint32, uint32, error) { + var set, clear uint32 = 0, 0 + for _, fflag := range strings.Split(fflags, ",") { + isClear := false + if clean, ok := strings.CutPrefix(fflag, "no"); ok { + isClear = true + fflag = clean + } + if value, ok := flagNameToValue[fflag]; ok { + if isClear { + clear |= value + } else { + set |= value + } + } else { + return 0, 0, fmt.Errorf("parsing file flags, unrecognised token: %s", fflag) + } + } + return set, clear, nil +} + +func formatFileFlags(fflags uint32) (string, error) { + res := []string{} + for fflags != 0 { + // Extract lowest set bit + fflag := uint32(1) << bits.TrailingZeros32(fflags) + if name, ok := flagValueToName[fflag]; ok { + res = append(res, name) + } else { + return "", fmt.Errorf("formatting file flags, unrecognised flag: %x", fflag) + } + fflags &= ^fflag + } + return strings.Join(res, ","), nil +} + +func ReadFileFlagsToTarHeader(path string, hdr *tar.Header) error { + st, err := system.Lstat(path) + if err != nil { + return err + } + fflags, err := formatFileFlags(st.Flags()) + if err != nil { + return err + } + if fflags != "" { + if hdr.PAXRecords == nil { + hdr.PAXRecords = map[string]string{} + } + hdr.PAXRecords[paxSCHILYFflags] = fflags + } + return nil +} + +func WriteFileFlagsFromTarHeader(path string, hdr *tar.Header) error { + if fflags, ok := hdr.PAXRecords[paxSCHILYFflags]; ok { + var set, clear uint32 + set, clear, err := parseFileFlags(fflags) + if err != nil { + return err + } + + // Apply the delta to the existing file flags + st, err := system.Lstat(path) + if err != nil { + return err + } + return system.Lchflags(path, (st.Flags() & ^clear)|set) + } + return nil +} + +func resetImmutable(path string, fi *os.FileInfo) error { + var flags uint32 + if fi != nil { + flags = (*fi).Sys().(*syscall.Stat_t).Flags + } else { + st, err := system.Lstat(path) + if err != nil { + return err + } + flags = st.Flags() + } + if flags&(system.SF_IMMUTABLE|system.UF_IMMUTABLE) != 0 { + flags &= ^(system.SF_IMMUTABLE | system.UF_IMMUTABLE) + return system.Lchflags(path, flags) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go new file mode 100644 index 0000000000..5ad480f7c7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go @@ -0,0 +1,20 @@ +//go:build !freebsd + +package archive + +import ( + "archive/tar" + "os" +) + +func ReadFileFlagsToTarHeader(path string, hdr *tar.Header) error { + return nil +} + +func WriteFileFlagsFromTarHeader(path string, hdr *tar.Header) error { + return nil +} + +func resetImmutable(path string, fi *os.FileInfo) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/filter.go b/vendor/github.com/containers/storage/pkg/archive/filter.go new file mode 100644 index 0000000000..9902a1ef57 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/filter.go @@ -0,0 +1,73 @@ +package archive + +import ( + "bytes" + "fmt" + "io" + "os/exec" + "strings" + "sync" +) + +var filterPath sync.Map + +func getFilterPath(name string) string { + path, ok := filterPath.Load(name) + if ok { + return path.(string) + } + + path, err := exec.LookPath(name) + if err != nil { + path = "" + } + + filterPath.Store(name, path) + return path.(string) +} + +type errorRecordingReader struct { + r io.Reader + err error +} + +func (r *errorRecordingReader) Read(p []byte) (int, error) { + n, err := r.r.Read(p) + if r.err == nil && err != io.EOF { + r.err = err + } + return n, err +} + +// tryProcFilter tries to run the command specified in args, passing input to its stdin and returning its stdout. +// cleanup() is a caller provided function that will be called when the command finishes running, regardless of +// whether it succeeds or fails. +// If the command is not found, it returns (nil, false) and the cleanup function is not called. +func tryProcFilter(args []string, input io.Reader, cleanup func()) (io.ReadCloser, bool) { + path := getFilterPath(args[0]) + if path == "" { + return nil, false + } + + var stderrBuf bytes.Buffer + + inputWithError := &errorRecordingReader{r: input} + + r, w := io.Pipe() + cmd := exec.Command(path, args[1:]...) + cmd.Stdin = inputWithError + cmd.Stdout = w + cmd.Stderr = &stderrBuf + go func() { + err := cmd.Run() + // if there is an error reading from input, prefer to return that error + if inputWithError.err != nil { + err = inputWithError.err + } else if err != nil && stderrBuf.Len() > 0 { + err = fmt.Errorf("%s: %w", strings.TrimRight(stderrBuf.String(), "\n"), err) + } + w.CloseWithError(err) // CloseWithErr(nil) == Close() + cleanup() + }() + return r, true +} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_linux.go b/vendor/github.com/containers/storage/pkg/archive/time_linux.go new file mode 100644 index 0000000000..3448569b1e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go new file mode 100644 index 0000000000..3555d753a1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +//go:build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/whiteouts.go b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go new file mode 100644 index 0000000000..d20478a10d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/containers/storage/pkg/archive/wrap.go b/vendor/github.com/containers/storage/pkg/archive/wrap.go new file mode 100644 index 0000000000..903befd763 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// - ./foo.txt with content "hello world" +// - ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go new file mode 100644 index 0000000000..0de063a24c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -0,0 +1,497 @@ +package compressor + +// NOTE: This is used from github.com/containers/image by callers that +// don't otherwise use containers/storage, so don't make this depend on any +// larger software like the graph drivers. + +import ( + "bufio" + "bytes" + "io" + + "github.com/containers/storage/pkg/chunked/internal/minimal" + "github.com/containers/storage/pkg/ioutils" + "github.com/klauspost/compress/zstd" + "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +const ( + RollsumBits = 16 + holesThreshold = int64(1 << 10) +) + +type holesFinder struct { + reader *bufio.Reader + zeros int64 + threshold int64 + + state int +} + +const ( + holesFinderStateRead = iota + holesFinderStateAccumulate + holesFinderStateFound + holesFinderStateEOF +) + +// readByte reads a single byte from the underlying reader. +// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil). +// If there are at least f.THRESHOLD consecutive zeros, then the +// return value is (N_CONSECUTIVE_ZEROS, '\x00'). +func (f *holesFinder) readByte() (int64, byte, error) { + for { + switch f.state { + // reading the file stream + case holesFinderStateRead: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + b, err := f.reader.ReadByte() + if err != nil { + return 0, b, err + } + + if b != 0 { + return 0, b, err + } + + f.zeros = 1 + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } else { + f.state = holesFinderStateAccumulate + } + // accumulating zeros, but still didn't reach the threshold + case holesFinderStateAccumulate: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + continue + } + return 0, b, err + } + + if b == 0 { + f.zeros++ + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } + } else { + if err := f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + } + // found a hole. Number of zeros >= threshold + case holesFinderStateFound: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + } + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + if b != 0 { + if err := f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + f.zeros++ + // reached EOF. Flush pending zeros if any. + case holesFinderStateEOF: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + return 0, 0, io.EOF + } + } +} + +type rollingChecksumReader struct { + reader *holesFinder + closed bool + rollsum *RollSum + pendingHole int64 + + // WrittenOut is the total number of bytes read from + // the stream. + WrittenOut int64 + + // IsLastChunkZeros tells whether the last generated + // chunk is a hole (made of consecutive zeros). If it + // is false, then the last chunk is a data chunk + // generated by the rolling checksum. + IsLastChunkZeros bool +} + +func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) { + rc.IsLastChunkZeros = false + + if rc.pendingHole > 0 { + toCopy := min(rc.pendingHole, int64(len(b))) + rc.pendingHole -= toCopy + for i := int64(0); i < toCopy; i++ { + b[i] = 0 + } + + rc.WrittenOut += toCopy + + rc.IsLastChunkZeros = true + + // if there are no other zeros left, terminate the chunk + return rc.pendingHole == 0, int(toCopy), nil + } + + if rc.closed { + return false, 0, io.EOF + } + + for i := range b { + holeLen, n, err := rc.reader.readByte() + if err != nil { + if err == io.EOF { + rc.closed = true + if i == 0 { + return false, 0, err + } + return false, i, nil + } + // Report any other error type + return false, -1, err + } + if holeLen > 0 { + for j := int64(0); j < holeLen; j++ { + rc.rollsum.Roll(0) + } + rc.pendingHole = holeLen + return true, i, nil + } + b[i] = n + rc.WrittenOut++ + rc.rollsum.Roll(n) + if rc.rollsum.OnSplitWithBits(RollsumBits) { + return true, i + 1, nil + } + } + return false, len(b), nil +} + +type chunk struct { + ChunkOffset int64 + Offset int64 + Checksum string + ChunkSize int64 + ChunkType string +} + +type tarSplitData struct { + compressed *bytes.Buffer + digester digest.Digester + uncompressedCounter *ioutils.WriteCounter + zstd *zstd.Encoder + packer storage.Packer +} + +func newTarSplitData(level int) (*tarSplitData, error) { + compressed := bytes.NewBuffer(nil) + digester := digest.Canonical.Digester() + + zstdWriter, err := minimal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level) + if err != nil { + return nil, err + } + + uncompressedCounter := ioutils.NewWriteCounter(zstdWriter) + metaPacker := storage.NewJSONPacker(uncompressedCounter) + + return &tarSplitData{ + compressed: compressed, + digester: digester, + uncompressedCounter: uncompressedCounter, + zstd: zstdWriter, + packer: metaPacker, + }, nil +} + +func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error { + // total written so far. Used to retrieve partial offsets in the file + dest := ioutils.NewWriteCounter(destFile) + + tarSplitData, err := newTarSplitData(level) + if err != nil { + return err + } + defer func() { + if tarSplitData.zstd != nil { + tarSplitData.zstd.Close() + } + }() + + its, err := asm.NewInputTarStream(reader, tarSplitData.packer, nil) + if err != nil { + return err + } + + tr := tar.NewReader(its) + tr.RawAccounting = true + + buf := make([]byte, 4096) + + zstdWriter, err := minimal.ZstdWriterWithLevel(dest, level) + if err != nil { + return err + } + defer func() { + if zstdWriter != nil { + zstdWriter.Close() + } + }() + + restartCompression := func() (int64, error) { + var offset int64 + if zstdWriter != nil { + if err := zstdWriter.Close(); err != nil { + return 0, err + } + offset = dest.Count + zstdWriter.Reset(dest) + } + return offset, nil + } + + var metadata []minimal.FileMetadata + for { + hdr, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + + rawBytes := tr.RawBytes() + if _, err := zstdWriter.Write(rawBytes); err != nil { + return err + } + + payloadDigester := digest.Canonical.Digester() + chunkDigester := digest.Canonical.Digester() + + // Now handle the payload, if any + startOffset := int64(0) + lastOffset := int64(0) + lastChunkOffset := int64(0) + + checksum := "" + + chunks := []chunk{} + + hf := &holesFinder{ + threshold: holesThreshold, + reader: bufio.NewReader(tr), + } + + rcReader := &rollingChecksumReader{ + reader: hf, + rollsum: NewRollSum(), + } + + payloadDest := io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) + for { + mustSplit, read, errRead := rcReader.Read(buf) + if errRead != nil && errRead != io.EOF { + return err + } + // restart the compression only if there is a payload. + if read > 0 { + if startOffset == 0 { + startOffset, err = restartCompression() + if err != nil { + return err + } + lastOffset = startOffset + } + + if _, err := payloadDest.Write(buf[:read]); err != nil { + return err + } + } + if (mustSplit || errRead == io.EOF) && startOffset > 0 { + off, err := restartCompression() + if err != nil { + return err + } + + chunkSize := rcReader.WrittenOut - lastChunkOffset + if chunkSize > 0 { + chunkType := minimal.ChunkTypeData + if rcReader.IsLastChunkZeros { + chunkType = minimal.ChunkTypeZeros + } + + chunks = append(chunks, chunk{ + ChunkOffset: lastChunkOffset, + Offset: lastOffset, + Checksum: chunkDigester.Digest().String(), + ChunkSize: chunkSize, + ChunkType: chunkType, + }) + } + + lastOffset = off + lastChunkOffset = rcReader.WrittenOut + chunkDigester = digest.Canonical.Digester() + payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) + } + if errRead == io.EOF { + if startOffset > 0 { + checksum = payloadDigester.Digest().String() + } + break + } + } + + mainEntry, err := minimal.NewFileMetadata(hdr) + if err != nil { + return err + } + mainEntry.Digest = checksum + mainEntry.Offset = startOffset + mainEntry.EndOffset = lastOffset + entries := []minimal.FileMetadata{mainEntry} + for i := 1; i < len(chunks); i++ { + entries = append(entries, minimal.FileMetadata{ + Type: minimal.TypeChunk, + Name: hdr.Name, + ChunkOffset: chunks[i].ChunkOffset, + }) + } + if len(chunks) > 1 { + for i := range chunks { + entries[i].ChunkSize = chunks[i].ChunkSize + entries[i].Offset = chunks[i].Offset + entries[i].ChunkDigest = chunks[i].Checksum + entries[i].ChunkType = chunks[i].ChunkType + } + } + metadata = append(metadata, entries...) + } + + rawBytes := tr.RawBytes() + if _, err := zstdWriter.Write(rawBytes); err != nil { + zstdWriter.Close() + return err + } + + // make sure the entire tarball is flushed to the output as it might contain + // some trailing zeros that affect the checksum. + if _, err := io.Copy(zstdWriter, its); err != nil { + zstdWriter.Close() + return err + } + + if err := zstdWriter.Flush(); err != nil { + zstdWriter.Close() + return err + } + if err := zstdWriter.Close(); err != nil { + return err + } + zstdWriter = nil + + if err := tarSplitData.zstd.Flush(); err != nil { + return err + } + if err := tarSplitData.zstd.Close(); err != nil { + return err + } + tarSplitData.zstd = nil + + ts := minimal.TarSplitData{ + Data: tarSplitData.compressed.Bytes(), + Digest: tarSplitData.digester.Digest(), + UncompressedSize: tarSplitData.uncompressedCounter.Count, + } + + return minimal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level) +} + +type zstdChunkedWriter struct { + tarSplitOut *io.PipeWriter + tarSplitErr chan error +} + +func (w zstdChunkedWriter) Close() error { + errClose := w.tarSplitOut.Close() + + if err := <-w.tarSplitErr; err != nil && err != io.EOF { + return err + } + return errClose +} + +func (w zstdChunkedWriter) Write(p []byte) (int, error) { + select { + case err := <-w.tarSplitErr: + w.tarSplitOut.Close() + return 0, err + default: + return w.tarSplitOut.Write(p) + } +} + +// zstdChunkedWriterWithLevel writes a zstd compressed tarball where each file is +// compressed separately so it can be addressed separately. Idea based on CRFS: +// https://github.com/google/crfs +// The difference with CRFS is that the zstd compression is used instead of gzip. +// The reason for it is that zstd supports embedding metadata ignored by the decoder +// as part of the compressed stream. +// A manifest json file with all the metadata is appended at the end of the tarball +// stream, using zstd skippable frames. +// The final file will look like: +// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2] +// Where: +// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER] +// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST] +// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER] +// MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format. +func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level int) (io.WriteCloser, error) { + ch := make(chan error, 1) + r, w := io.Pipe() + + go func() { + ch <- writeZstdChunkedStream(out, metadata, r, level) + _, _ = io.Copy(io.Discard, r) // Ordinarily writeZstdChunkedStream consumes all of r. If it fails, ensure the write end never blocks and eventually terminates. + r.Close() + close(ch) + }() + + return zstdChunkedWriter{ + tarSplitOut: w, + tarSplitErr: ch, + }, nil +} + +// ZstdCompressor is a CompressorFunc for the zstd compression algorithm. +func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level == nil { + l := 10 + level = &l + } + + return zstdChunkedWriterWithLevel(r, metadata, *level) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go new file mode 100644 index 0000000000..59df6901e8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go @@ -0,0 +1,85 @@ +/* +Copyright 2011 The Perkeep Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rollsum implements rolling checksums similar to apenwarr's bup, which +// is similar to librsync. +// +// The bup project is at https://github.com/apenwarr/bup and its splitting in +// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c +package compressor + +import ( + "math/bits" +) + +const ( + windowSize = 64 // Roll assumes windowSize is a power of 2 + charOffset = 31 +) + +const ( + blobBits = 13 + blobSize = 1 << blobBits // 8k +) + +type RollSum struct { + s1, s2 uint32 + window [windowSize]uint8 + wofs int +} + +func NewRollSum() *RollSum { + return &RollSum{ + s1: windowSize * charOffset, + s2: windowSize * (windowSize - 1) * charOffset, + } +} + +func (rs *RollSum) add(drop, add uint32) { + s1 := rs.s1 + add - drop + rs.s1 = s1 + rs.s2 += s1 - uint32(windowSize)*(drop+charOffset) +} + +// Roll adds ch to the rolling sum. +func (rs *RollSum) Roll(ch byte) { + wp := &rs.window[rs.wofs] + rs.add(uint32(*wp), uint32(ch)) + *wp = ch + rs.wofs = (rs.wofs + 1) & (windowSize - 1) +} + +// OnSplit reports whether at least 13 consecutive trailing bits of +// the current checksum are set the same way. +func (rs *RollSum) OnSplit() bool { + return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1)) +} + +// OnSplitWithBits reports whether at least n consecutive trailing bits +// of the current checksum are set the same way. +func (rs *RollSum) OnSplitWithBits(n uint32) bool { + mask := (uint32(1) << n) - 1 + return rs.s2&mask == (^uint32(0))&mask +} + +func (rs *RollSum) Bits() int { + rsum := rs.Digest() >> (blobBits + 1) + return blobBits + bits.TrailingZeros32(^rsum) +} + +func (rs *RollSum) Digest() uint32 { + return (rs.s1 << 16) | (rs.s2 & 0xffff) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go new file mode 100644 index 0000000000..f85c5973ca --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go @@ -0,0 +1,324 @@ +package minimal + +// NOTE: This is used from github.com/containers/image by callers that +// don't otherwise use containers/storage, so don't make this depend on any +// larger software like the graph drivers. + +import ( + "bytes" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "strings" + "time" + + "github.com/containers/storage/pkg/archive" + jsoniter "github.com/json-iterator/go" + "github.com/klauspost/compress/zstd" + "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" +) + +// TOC is short for Table of Contents and is used by the zstd:chunked +// file format to effectively add an overall index into the contents +// of a tarball; it also includes file metadata. +type TOC struct { + // Version is currently expected to be 1 + Version int `json:"version"` + // Entries is the list of file metadata in this TOC. + // The ordering in this array currently defaults to being the same + // as that of the tar stream; however, this should not be relied on. + Entries []FileMetadata `json:"entries"` + // TarSplitDigest is the checksum of the "tar-split" data which + // is included as a distinct skippable zstd frame before the TOC. + TarSplitDigest digest.Digest `json:"tarSplitDigest,omitempty"` +} + +// FileMetadata is an entry in the TOC that includes both generic file metadata +// that duplicates what can found in the tar header (and should match), but +// also special/custom content (see below). +// +// Regular files may optionally be represented as a sequence of “chunks”, +// which may be ChunkTypeData or ChunkTypeZeros (and ChunkTypeData boundaries +// are heuristically determined to increase chance of chunk matching / reuse +// similar to rsync). In that case, the regular file is represented +// as an initial TypeReg entry (with all metadata for the file as a whole) +// immediately followed by zero or more TypeChunk entries (containing only Type, +// Name and Chunk* fields); if there is at least one TypeChunk entry, the Chunk* +// fields are relevant in all of these entries, including the initial +// TypeReg one. +// +// Note that the metadata here, when fetched by a zstd:chunked aware client, +// is used instead of that in the tar stream. The contents of the tar stream +// are not used in this scenario. +type FileMetadata struct { + // If you add any fields, update ensureFileMetadataMatches as well! + + // The metadata below largely duplicates that in the tar headers. + Type string `json:"type"` + Name string `json:"name"` + Linkname string `json:"linkName,omitempty"` + Mode int64 `json:"mode,omitempty"` + Size int64 `json:"size,omitempty"` + UID int `json:"uid,omitempty"` + GID int `json:"gid,omitempty"` + ModTime *time.Time `json:"modtime,omitempty"` + AccessTime *time.Time `json:"accesstime,omitempty"` + ChangeTime *time.Time `json:"changetime,omitempty"` + Devmajor int64 `json:"devMajor,omitempty"` + Devminor int64 `json:"devMinor,omitempty"` + Xattrs map[string]string `json:"xattrs,omitempty"` + // Digest is a hexadecimal sha256 checksum of the file contents; it + // is empty for empty files + Digest string `json:"digest,omitempty"` + Offset int64 `json:"offset,omitempty"` + EndOffset int64 `json:"endOffset,omitempty"` + + ChunkSize int64 `json:"chunkSize,omitempty"` + ChunkOffset int64 `json:"chunkOffset,omitempty"` + ChunkDigest string `json:"chunkDigest,omitempty"` + ChunkType string `json:"chunkType,omitempty"` +} + +const ( + ChunkTypeData = "" + ChunkTypeZeros = "zeros" +) + +const ( + // The following types correspond to regular types of entries that can + // appear in a tar archive. + TypeReg = "reg" + TypeLink = "hardlink" + TypeChar = "char" + TypeBlock = "block" + TypeDir = "dir" + TypeFifo = "fifo" + TypeSymlink = "symlink" + // TypeChunk is special; in zstd:chunked not only are files individually + // compressed and indexable, there is a "rolling checksum" used to compute + // "chunks" of individual file contents, that are also added to the TOC + TypeChunk = "chunk" +) + +var TarTypes = map[byte]string{ + tar.TypeReg: TypeReg, + tar.TypeLink: TypeLink, + tar.TypeChar: TypeChar, + tar.TypeBlock: TypeBlock, + tar.TypeDir: TypeDir, + tar.TypeFifo: TypeFifo, + tar.TypeSymlink: TypeSymlink, +} + +func GetType(t byte) (string, error) { + r, found := TarTypes[t] + if !found { + return "", fmt.Errorf("unknown tarball type: %v", t) + } + return r, nil +} + +const ( + // ManifestChecksumKey is a hexadecimal sha256 digest of the compressed manifest digest. + ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum" + // ManifestInfoKey is an annotation that signals the start of the TOC (manifest) + // contents which are embedded as a skippable zstd frame. It has a format of + // four decimal integers separated by `:` as follows: + // ::: + // The is ManifestTypeCRFS which should have the value `1`. + ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position" + // TarSplitInfoKey is an annotation that signals the start of the "tar-split" metadata + // contents which are embedded as a skippable zstd frame. It has a format of + // three decimal integers separated by `:` as follows: + // :: + TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position" + + // TarSplitChecksumKey is no longer used and is replaced by the TOC.TarSplitDigest field instead. + // The value is retained here as a constant as a historical reference for older zstd:chunked images. + // TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" + + // ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file. + ManifestTypeCRFS = 1 + + // FooterSizeSupported is the footer size supported by this implementation. + // Newer versions of the image format might increase this value, so reject + // any version that is not supported. + FooterSizeSupported = 64 +) + +var ( + // when the zstd decoder encounters a skippable frame + 1 byte for the size, it + // will ignore it. + // https://tools.ietf.org/html/rfc8478#section-3.1.2 + skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18} + + ZstdChunkedFrameMagic = []byte{0x47, 0x4e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78} +) + +func appendZstdSkippableFrame(dest io.Writer, data []byte) error { + if _, err := dest.Write(skippableFrameMagic); err != nil { + return err + } + + size := make([]byte, 4) + binary.LittleEndian.PutUint32(size, uint32(len(data))) + if _, err := dest.Write(size); err != nil { + return err + } + if _, err := dest.Write(data); err != nil { + return err + } + return nil +} + +type TarSplitData struct { + Data []byte + Digest digest.Digest + UncompressedSize int64 +} + +func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, tarSplitData *TarSplitData, metadata []FileMetadata, level int) error { + // 8 is the size of the zstd skippable frame header + the frame size + const zstdSkippableFrameHeader = 8 + manifestOffset := offset + zstdSkippableFrameHeader + + toc := TOC{ + Version: 1, + Entries: metadata, + TarSplitDigest: tarSplitData.Digest, + } + + json := jsoniter.ConfigCompatibleWithStandardLibrary + // Generate the manifest + manifest, err := json.Marshal(toc) + if err != nil { + return err + } + + var compressedBuffer bytes.Buffer + zstdWriter, err := ZstdWriterWithLevel(&compressedBuffer, level) + if err != nil { + return err + } + if _, err := zstdWriter.Write(manifest); err != nil { + zstdWriter.Close() + return err + } + if err := zstdWriter.Close(); err != nil { + return err + } + compressedManifest := compressedBuffer.Bytes() + + manifestDigester := digest.Canonical.Digester() + manifestChecksum := manifestDigester.Hash() + if _, err := manifestChecksum.Write(compressedManifest); err != nil { + return err + } + + outMetadata[ManifestChecksumKey] = manifestDigester.Digest().String() + outMetadata[ManifestInfoKey] = fmt.Sprintf("%d:%d:%d:%d", manifestOffset, len(compressedManifest), len(manifest), ManifestTypeCRFS) + if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil { + return err + } + + tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader + outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize) + if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil { + return err + } + + footer := ZstdChunkedFooterData{ + ManifestType: uint64(ManifestTypeCRFS), + Offset: manifestOffset, + LengthCompressed: uint64(len(compressedManifest)), + LengthUncompressed: uint64(len(manifest)), + OffsetTarSplit: tarSplitOffset, + LengthCompressedTarSplit: uint64(len(tarSplitData.Data)), + LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize), + } + + manifestDataLE := footerDataToBlob(footer) + + return appendZstdSkippableFrame(dest, manifestDataLE) +} + +func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) +} + +// ZstdChunkedFooterData contains all the data stored in the zstd:chunked footer. +// This footer exists to make the blobs self-describing, our implementation +// never reads it: +// Partial pull security hinges on the TOC digest, and that exists as a layer annotation; +// so we are relying on the layer annotations anyway, and doing so means we can avoid +// a round-trip to fetch this binary footer. +type ZstdChunkedFooterData struct { + ManifestType uint64 + + Offset uint64 + LengthCompressed uint64 + LengthUncompressed uint64 + + OffsetTarSplit uint64 + LengthCompressedTarSplit uint64 + LengthUncompressedTarSplit uint64 + ChecksumAnnotationTarSplit string // Deprecated: This field is not a part of the footer and not used for any purpose. +} + +func footerDataToBlob(footer ZstdChunkedFooterData) []byte { + // Store the offset to the manifest and its size in LE order + manifestDataLE := make([]byte, FooterSizeSupported) + binary.LittleEndian.PutUint64(manifestDataLE[8*0:], footer.Offset) + binary.LittleEndian.PutUint64(manifestDataLE[8*1:], footer.LengthCompressed) + binary.LittleEndian.PutUint64(manifestDataLE[8*2:], footer.LengthUncompressed) + binary.LittleEndian.PutUint64(manifestDataLE[8*3:], footer.ManifestType) + binary.LittleEndian.PutUint64(manifestDataLE[8*4:], footer.OffsetTarSplit) + binary.LittleEndian.PutUint64(manifestDataLE[8*5:], footer.LengthCompressedTarSplit) + binary.LittleEndian.PutUint64(manifestDataLE[8*6:], footer.LengthUncompressedTarSplit) + copy(manifestDataLE[8*7:], ZstdChunkedFrameMagic) + + return manifestDataLE +} + +// timeIfNotZero returns a pointer to the time.Time if it is not zero, otherwise it returns nil. +func timeIfNotZero(t *time.Time) *time.Time { + if t == nil || t.IsZero() { + return nil + } + return t +} + +// NewFileMetadata creates a basic FileMetadata entry for hdr. +// The caller must set DigestOffset/EndOffset, and the Chunk* values, separately. +func NewFileMetadata(hdr *tar.Header) (FileMetadata, error) { + typ, err := GetType(hdr.Typeflag) + if err != nil { + return FileMetadata{}, err + } + xattrs := make(map[string]string) + for k, v := range hdr.PAXRecords { + xattrKey, ok := strings.CutPrefix(k, archive.PaxSchilyXattr) + if !ok { + continue + } + xattrs[xattrKey] = base64.StdEncoding.EncodeToString([]byte(v)) + } + return FileMetadata{ + Type: typ, + Name: hdr.Name, + Linkname: hdr.Linkname, + Mode: hdr.Mode, + Size: hdr.Size, + UID: hdr.Uid, + GID: hdr.Gid, + ModTime: timeIfNotZero(&hdr.ModTime), + AccessTime: timeIfNotZero(&hdr.AccessTime), + ChangeTime: timeIfNotZero(&hdr.ChangeTime), + Devmajor: hdr.Devmajor, + Devminor: hdr.Devminor, + Xattrs: xattrs, + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go new file mode 100644 index 0000000000..6f39b2ae2e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go @@ -0,0 +1,41 @@ +package toc + +import ( + "errors" + + "github.com/containers/storage/pkg/chunked/internal/minimal" + digest "github.com/opencontainers/go-digest" +) + +// tocJSONDigestAnnotation is the annotation key for the digest of the estargz +// TOC JSON. +// It is defined in github.com/containerd/stargz-snapshotter/estargz as TOCJSONDigestAnnotation +// Duplicate it here to avoid a dependency on the package. +const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" + +// GetTOCDigest returns the digest of the TOC as recorded in the annotations. +// This function retrieves a digest that represents the content of a +// table of contents (TOC) from the image's annotations. +// This is an experimental feature and may be changed/removed in the future. +func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) { + d1, ok1 := annotations[tocJSONDigestAnnotation] + d2, ok2 := annotations[minimal.ManifestChecksumKey] + switch { + case ok1 && ok2: + return nil, errors.New("both zstd:chunked and eStargz TOC found") + case ok1: + d, err := digest.Parse(d1) + if err != nil { + return nil, err + } + return &d, nil + case ok2: + d, err := digest.Parse(d2) + if err != nil { + return nil, err + } + return &d, nil + default: + return nil, nil + } +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go index 785b133174..04cfafcd5c 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go @@ -13,7 +13,7 @@ import ( func Exists(path string) error { // It uses unix.Faccessat which is a faster operation compared to os.Stat for // simply checking the existence of a file. - err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0) + err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_EACCESS) if err != nil { return &os.PathError{Op: "faccessat", Path: path, Err: err} } @@ -25,7 +25,7 @@ func Exists(path string) error { func Lexists(path string) error { // It uses unix.Faccessat which is a faster operation compared to os.Stat for // simply checking the existence of a file. - err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW) + err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW|unix.AT_EACCESS) if err != nil { return &os.PathError{Op: "faccessat", Path: path, Err: err} } diff --git a/vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go b/vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go new file mode 100644 index 0000000000..9f5c6c90bb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go @@ -0,0 +1,20 @@ +package fileutils + +import ( + "io" + "os" + + "golang.org/x/sys/unix" +) + +// ReflinkOrCopy attempts to reflink the source to the destination fd. +// If reflinking fails or is unsupported, it falls back to io.Copy(). +func ReflinkOrCopy(src, dst *os.File) error { + err := unix.IoctlFileClone(int(dst.Fd()), int(src.Fd())) + if err == nil { + return nil + } + + _, err = io.Copy(dst, src) + return err +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/reflink_unsupported.go b/vendor/github.com/containers/storage/pkg/fileutils/reflink_unsupported.go new file mode 100644 index 0000000000..c0a30e670c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/reflink_unsupported.go @@ -0,0 +1,15 @@ +//go:build !linux + +package fileutils + +import ( + "io" + "os" +) + +// ReflinkOrCopy attempts to reflink the source to the destination fd. +// If reflinking fails or is unsupported, it falls back to io.Copy(). +func ReflinkOrCopy(src, dst *os.File) error { + _, err := io.Copy(dst, src) + return err +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index 299bdbef7f..13277f090e 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -429,25 +429,25 @@ func parseOverrideXattr(xstat []byte) (Stat, error) { var stat Stat attrs := strings.Split(string(xstat), ":") if len(attrs) < 3 { - return stat, fmt.Errorf("The number of parts in %s is less than 3", + return stat, fmt.Errorf("the number of parts in %s is less than 3", ContainersOverrideXattr) } value, err := strconv.ParseUint(attrs[0], 10, 32) if err != nil { - return stat, fmt.Errorf("Failed to parse UID: %w", err) + return stat, fmt.Errorf("failed to parse UID: %w", err) } stat.IDs.UID = int(value) value, err = strconv.ParseUint(attrs[1], 10, 32) if err != nil { - return stat, fmt.Errorf("Failed to parse GID: %w", err) + return stat, fmt.Errorf("failed to parse GID: %w", err) } stat.IDs.GID = int(value) value, err = strconv.ParseUint(attrs[2], 8, 32) if err != nil { - return stat, fmt.Errorf("Failed to parse mode: %w", err) + return stat, fmt.Errorf("failed to parse mode: %w", err) } stat.Mode = os.FileMode(value) & os.ModePerm if value&0o1000 != 0 { @@ -484,7 +484,7 @@ func parseOverrideXattr(xstat []byte) (Stat, error) { return stat, err } } else { - return stat, fmt.Errorf("Invalid file type %s", typ) + return stat, fmt.Errorf("invalid file type %s", typ) } } return stat, nil @@ -494,18 +494,18 @@ func parseDevice(typ string) (int, int, error) { parts := strings.Split(typ, "-") // If there are more than 3 parts, just ignore them to be forward compatible if len(parts) < 3 { - return 0, 0, fmt.Errorf("Invalid device type %s", typ) + return 0, 0, fmt.Errorf("invalid device type %s", typ) } if parts[0] != "block" && parts[0] != "char" { - return 0, 0, fmt.Errorf("Invalid device type %s", typ) + return 0, 0, fmt.Errorf("invalid device type %s", typ) } major, err := strconv.Atoi(parts[1]) if err != nil { - return 0, 0, fmt.Errorf("Failed to parse major number: %w", err) + return 0, 0, fmt.Errorf("failed to parse major number: %w", err) } minor, err := strconv.Atoi(parts[2]) if err != nil { - return 0, 0, fmt.Errorf("Failed to parse minor number: %w", err) + return 0, 0, fmt.Errorf("failed to parse minor number: %w", err) } return major, minor, nil } diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go index 2bd26d0e34..9a17f57014 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go @@ -5,6 +5,7 @@ package idtools import ( "errors" "os/user" + "sync" "unsafe" ) @@ -13,16 +14,14 @@ import ( #include #include #include -const char *Prog = "storage"; -FILE *shadow_logfd = NULL; struct subid_range get_range(struct subid_range *ranges, int i) { - shadow_logfd = stderr; - return ranges[i]; + return ranges[i]; } #if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4) +# define subid_init libsubid_init # define subid_get_uid_ranges get_subuid_ranges # define subid_get_gid_ranges get_subgid_ranges #endif @@ -30,6 +29,8 @@ struct subid_range get_range(struct subid_range *ranges, int i) */ import "C" +var onceInit sync.Once + func readSubid(username string, isUser bool) (ranges, error) { var ret ranges uidstr := "" @@ -42,6 +43,10 @@ func readSubid(username string, isUser bool) (ranges, error) { uidstr = u.Uid } + onceInit.Do(func() { + C.subid_init(C.CString("storage"), C.stderr) + }) + cUsername := C.CString(username) defer C.free(unsafe.Pointer(cUsername)) diff --git a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go index 72a04f3491..cf60580359 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go @@ -93,10 +93,7 @@ loop0: } // add new byte slice to the buffers slice and continue writing - nextCap := b.Cap() * 2 - if nextCap > maxCap { - nextCap = maxCap - } + nextCap := min(b.Cap()*2, maxCap) bp.buf = append(bp.buf, getBuffer(nextCap)) } bp.wait.Broadcast() @@ -178,7 +175,7 @@ func getBuffer(size int) *fixedBuffer { bufPoolsLock.Lock() pool, ok := bufPools[size] if !ok { - pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + pool = &sync.Pool{New: func() any { return &fixedBuffer{buf: make([]byte, 0, size)} }} bufPools[size] = pool } bufPoolsLock.Unlock() diff --git a/vendor/github.com/containers/storage/pkg/pools/pools.go b/vendor/github.com/containers/storage/pkg/pools/pools.go new file mode 100644 index 0000000000..e68648d1d6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/pools/pools.go @@ -0,0 +1,119 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/containers/storage/pkg/ioutils" +) + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool *BufioReaderPool + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool *sync.Pool +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + pool := &sync.Pool{ + New: func() any { return bufio.NewReaderSize(nil, size) }, + } + return &BufioReaderPool{pool: pool} +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool *sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + pool := &sync.Pool{ + New: func() any { return bufio.NewWriterSize(nil, size) }, + } + return &BufioWriterPool{pool: pool} +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/vendor/github.com/containers/storage/pkg/promise/promise.go b/vendor/github.com/containers/storage/pkg/promise/promise.go new file mode 100644 index 0000000000..dd52b9082f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go index 0c032e6c47..a1938cd4f3 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/reexec.go +++ b/vendor/github.com/containers/storage/pkg/reexec/reexec.go @@ -49,7 +49,7 @@ func panicIfNotInitialized() { } } -func naiveSelf() string { //nolint: unused +func naiveSelf() string { name := os.Args[0] if filepath.Base(name) == name { if lp, err := exec.LookPath(name); err == nil { diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go index e3d13463f6..0dee88d1b8 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go @@ -9,9 +9,9 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { mode: s.Mode, uid: s.Uid, gid: s.Gid, - rdev: uint64(s.Rdev), + rdev: uint64(s.Rdev), //nolint:unconvert mtim: s.Mtim, - dev: uint64(s.Dev), + dev: uint64(s.Dev), //nolint:unconvert }, nil } diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go index b45a6819a2..9e0e562d20 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -32,9 +32,9 @@ type Cmd struct { *exec.Cmd UnshareFlags int UseNewuidmap bool - UidMappings []specs.LinuxIDMapping // nolint: revive,golint + UidMappings []specs.LinuxIDMapping //nolint: revive UseNewgidmap bool - GidMappings []specs.LinuxIDMapping // nolint: revive,golint + GidMappings []specs.LinuxIDMapping //nolint: revive GidMappingsEnableSetgroups bool Setsid bool Setpgrp bool @@ -98,7 +98,7 @@ func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error return cap.Get(capability.EFFECTIVE, capid), nil } -func (c *Cmd) Start() error { +func (c *Cmd) Start() (retErr error) { runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -167,6 +167,15 @@ func (c *Cmd) Start() error { return err } + // If the function fails from here, we need to make sure the + // child process is killed and properly cleaned up. + defer func() { + if retErr != nil { + _ = c.Cmd.Process.Kill() + _ = c.Cmd.Wait() + } + }() + // Close the ends of the pipes that the parent doesn't need. continueRead.Close() continueRead = nil @@ -240,7 +249,7 @@ func (c *Cmd) Start() error { if err != nil { return fmt.Errorf("finding newgidmap: %w", err) } - cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(g.String())...)...) g.Reset() cmd.Stdout = g cmd.Stderr = g @@ -258,7 +267,7 @@ func (c *Cmd) Start() error { } logrus.Warnf("Falling back to single mapping") g.Reset() - g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid()))) + fmt.Fprintf(g, "0 %d 1\n", os.Getegid()) } } if !gidmapSet { @@ -300,7 +309,7 @@ func (c *Cmd) Start() error { if err != nil { return fmt.Errorf("finding newuidmap: %w", err) } - cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(u.String())...)...) u.Reset() cmd.Stdout = u cmd.Stderr = u @@ -319,7 +328,7 @@ func (c *Cmd) Start() error { logrus.Warnf("Falling back to single mapping") u.Reset() - u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid()))) + fmt.Fprintf(u, "0 %d 1\n", os.Geteuid()) } } if !uidmapSet { @@ -459,7 +468,7 @@ type Runnable interface { Run() error } -func bailOnError(err error, format string, a ...interface{}) { // nolint: revive,goprintffuncname +func bailOnError(err error, format string, a ...any) { //nolint:revive,goprintffuncname if err != nil { if format != "" { logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) diff --git a/vendor/github.com/cyberphone/json-canonicalization/LICENSE b/vendor/github.com/cyberphone/json-canonicalization/LICENSE new file mode 100644 index 0000000000..591211595a --- /dev/null +++ b/vendor/github.com/cyberphone/json-canonicalization/LICENSE @@ -0,0 +1,13 @@ + Copyright 2018 Anders Rundgren + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go new file mode 100644 index 0000000000..92574a3f4f --- /dev/null +++ b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go @@ -0,0 +1,71 @@ +// +// Copyright 2006-2019 WebPKI.org (http://webpki.org). +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// This package converts numbers in IEEE-754 double precision into the +// format specified for JSON in EcmaScript Version 6 and forward. +// The core application for this is canonicalization: +// https://tools.ietf.org/html/draft-rundgren-json-canonicalization-scheme-02 + +package jsoncanonicalizer + +import ( + "errors" + "math" + "strconv" + "strings" +) + +const invalidPattern uint64 = 0x7ff0000000000000 + +func NumberToJSON(ieeeF64 float64) (res string, err error) { + ieeeU64 := math.Float64bits(ieeeF64) + + // Special case: NaN and Infinity are invalid in JSON + if (ieeeU64 & invalidPattern) == invalidPattern { + return "null", errors.New("Invalid JSON number: " + strconv.FormatUint(ieeeU64, 16)) + } + + // Special case: eliminate "-0" as mandated by the ES6-JSON/JCS specifications + if ieeeF64 == 0 { // Right, this line takes both -0 and 0 + return "0", nil + } + + // Deal with the sign separately + var sign string = "" + if ieeeF64 < 0 { + ieeeF64 =-ieeeF64 + sign = "-" + } + + // ES6 has a unique "g" format + var format byte = 'e' + if ieeeF64 < 1e+21 && ieeeF64 >= 1e-6 { + format = 'f' + } + + // The following should do the trick: + es6Formatted := strconv.FormatFloat(ieeeF64, format, -1, 64) + + // Minor cleanup + exponent := strings.IndexByte(es6Formatted, 'e') + if exponent > 0 { + // Go outputs "1e+09" which must be rewritten as "1e+9" + if es6Formatted[exponent + 2] == '0' { + es6Formatted = es6Formatted[:exponent + 2] + es6Formatted[exponent + 3:] + } + } + return sign + es6Formatted, nil +} diff --git a/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go new file mode 100644 index 0000000000..661f41055e --- /dev/null +++ b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go @@ -0,0 +1,378 @@ +// +// Copyright 2006-2019 WebPKI.org (http://webpki.org). +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// This package transforms JSON data in UTF-8 according to: +// https://tools.ietf.org/html/draft-rundgren-json-canonicalization-scheme-02 + +package jsoncanonicalizer + +import ( + "errors" + "container/list" + "fmt" + "strconv" + "strings" + "unicode/utf16" +) + +type nameValueType struct { + name string + sortKey []uint16 + value string +} + +// JSON standard escapes (modulo \u) +var asciiEscapes = []byte{'\\', '"', 'b', 'f', 'n', 'r', 't'} +var binaryEscapes = []byte{'\\', '"', '\b', '\f', '\n', '\r', '\t'} + +// JSON literals +var literals = []string{"true", "false", "null"} + +func Transform(jsonData []byte) (result []byte, e error) { + + // JSON data MUST be UTF-8 encoded + var jsonDataLength int = len(jsonData) + + // Current pointer in jsonData + var index int = 0 + + // "Forward" declarations are needed for closures referring each other + var parseElement func() string + var parseSimpleType func() string + var parseQuotedString func() string + var parseObject func() string + var parseArray func() string + + var globalError error = nil + + checkError := func(e error) { + // We only honor the first reported error + if globalError == nil { + globalError = e + } + } + + setError := func(msg string) { + checkError(errors.New(msg)) + } + + isWhiteSpace := func(c byte) bool { + return c == 0x20 || c == 0x0a || c == 0x0d || c == 0x09 + } + + nextChar := func() byte { + if index < jsonDataLength { + c := jsonData[index] + if c > 0x7f { + setError("Unexpected non-ASCII character") + } + index++ + return c + } + setError("Unexpected EOF reached") + return '"' + } + + scan := func() byte { + for { + c := nextChar() + if isWhiteSpace(c) { + continue; + } + return c + } + } + + scanFor := func(expected byte) { + c := scan() + if c != expected { + setError("Expected '" + string(expected) + "' but got '" + string(c) + "'") + } + } + + getUEscape := func() rune { + start := index + nextChar() + nextChar() + nextChar() + nextChar() + if globalError != nil { + return 0 + } + u16, err := strconv.ParseUint(string(jsonData[start:index]), 16, 64) + checkError(err) + return rune(u16) + } + + testNextNonWhiteSpaceChar := func() byte { + save := index + c := scan() + index = save + return c + } + + decorateString := func(rawUTF8 string) string { + var quotedString strings.Builder + quotedString.WriteByte('"') + CoreLoop: + for _, c := range []byte(rawUTF8) { + // Is this within the JSON standard escapes? + for i, esc := range binaryEscapes { + if esc == c { + quotedString.WriteByte('\\') + quotedString.WriteByte(asciiEscapes[i]) + continue CoreLoop + } + } + if c < 0x20 { + // Other ASCII control characters must be escaped with \uhhhh + quotedString.WriteString(fmt.Sprintf("\\u%04x", c)) + } else { + quotedString.WriteByte(c) + } + } + quotedString.WriteByte('"') + return quotedString.String() + } + + parseQuotedString = func() string { + var rawString strings.Builder + CoreLoop: + for globalError == nil { + var c byte + if index < jsonDataLength { + c = jsonData[index] + index++ + } else { + nextChar() + break + } + if (c == '"') { + break; + } + if c < ' ' { + setError("Unterminated string literal") + } else if c == '\\' { + // Escape sequence + c = nextChar() + if c == 'u' { + // The \u escape + firstUTF16 := getUEscape() + if utf16.IsSurrogate(firstUTF16) { + // If the first UTF-16 code unit has a certain value there must be + // another succeeding UTF-16 code unit as well + if nextChar() != '\\' || nextChar() != 'u' { + setError("Missing surrogate") + } else { + // Output the UTF-32 code point as UTF-8 + rawString.WriteRune(utf16.DecodeRune(firstUTF16, getUEscape())) + } + } else { + // Single UTF-16 code identical to UTF-32. Output as UTF-8 + rawString.WriteRune(firstUTF16) + } + } else if c == '/' { + // Benign but useless escape + rawString.WriteByte('/') + } else { + // The JSON standard escapes + for i, esc := range asciiEscapes { + if esc == c { + rawString.WriteByte(binaryEscapes[i]) + continue CoreLoop + } + } + setError("Unexpected escape: \\" + string(c)) + } + } else { + // Just an ordinary ASCII character alternatively a UTF-8 byte + // outside of ASCII. + // Note that properly formatted UTF-8 never clashes with ASCII + // making byte per byte search for ASCII break characters work + // as expected. + rawString.WriteByte(c) + } + } + return rawString.String() + } + + parseSimpleType = func() string { + var token strings.Builder + index-- + for globalError == nil { + c := testNextNonWhiteSpaceChar() + if c == ',' || c == ']' || c == '}' { + break; + } + c = nextChar() + if isWhiteSpace(c) { + break + } + token.WriteByte(c) + } + if token.Len() == 0 { + setError("Missing argument") + } + value := token.String() + // Is it a JSON literal? + for _, literal := range literals { + if literal == value { + return literal + } + } + // Apparently not so we assume that it is a I-JSON number + ieeeF64, err := strconv.ParseFloat(value, 64) + checkError(err) + value, err = NumberToJSON(ieeeF64) + checkError(err) + return value + } + + parseElement = func() string { + switch scan() { + case '{': + return parseObject() + case '"': + return decorateString(parseQuotedString()) + case '[': + return parseArray() + default: + return parseSimpleType() + } + } + + parseArray = func() string { + var arrayData strings.Builder + arrayData.WriteByte('[') + var next bool = false + for globalError == nil && testNextNonWhiteSpaceChar() != ']' { + if next { + scanFor(',') + arrayData.WriteByte(',') + } else { + next = true + } + arrayData.WriteString(parseElement()) + } + scan() + arrayData.WriteByte(']') + return arrayData.String() + } + + lexicographicallyPrecedes := func(sortKey []uint16, e *list.Element) bool { + // Find the minimum length of the sortKeys + oldSortKey := e.Value.(nameValueType).sortKey + minLength := len(oldSortKey) + if minLength > len(sortKey) { + minLength = len(sortKey) + } + for q := 0; q < minLength; q++ { + diff := int(sortKey[q]) - int(oldSortKey[q]) + if diff < 0 { + // Smaller => Precedes + return true + } else if diff > 0 { + // Bigger => No match + return false + } + // Still equal => Continue + } + // The sortKeys compared equal up to minLength + if len(sortKey) < len(oldSortKey) { + // Shorter => Precedes + return true + } + if len(sortKey) == len(oldSortKey) { + setError("Duplicate key: " + e.Value.(nameValueType).name) + } + // Longer => No match + return false + } + + parseObject = func() string { + nameValueList := list.New() + var next bool = false + CoreLoop: + for globalError == nil && testNextNonWhiteSpaceChar() != '}' { + if next { + scanFor(',') + } + next = true + scanFor('"') + rawUTF8 := parseQuotedString() + if globalError != nil { + break; + } + // Sort keys on UTF-16 code units + // Since UTF-8 doesn't have endianess this is just a value transformation + // In the Go case the transformation is UTF-8 => UTF-32 => UTF-16 + sortKey := utf16.Encode([]rune(rawUTF8)) + scanFor(':') + nameValue := nameValueType{rawUTF8, sortKey, parseElement()} + for e := nameValueList.Front(); e != nil; e = e.Next() { + // Check if the key is smaller than a previous key + if lexicographicallyPrecedes(sortKey, e) { + // Precedes => Insert before and exit sorting + nameValueList.InsertBefore(nameValue, e) + continue CoreLoop + } + // Continue searching for a possibly succeeding sortKey + // (which is straightforward since the list is ordered) + } + // The sortKey is either the first or is succeeding all previous sortKeys + nameValueList.PushBack(nameValue) + } + // Scan away '}' + scan() + // Now everything is sorted so we can properly serialize the object + var objectData strings.Builder + objectData.WriteByte('{') + next = false + for e := nameValueList.Front(); e != nil; e = e.Next() { + if next { + objectData.WriteByte(',') + } + next = true + nameValue := e.Value.(nameValueType) + objectData.WriteString(decorateString(nameValue.name)) + objectData.WriteByte(':') + objectData.WriteString(nameValue.value) + } + objectData.WriteByte('}') + return objectData.String() + } + + ///////////////////////////////////////////////// + // This is where Transform actually begins... // + ///////////////////////////////////////////////// + var transformed string + + if testNextNonWhiteSpaceChar() == '[' { + scan() + transformed = parseArray() + } else { + scanFor('{') + transformed = parseObject() + } + for index < jsonDataLength { + if !isWhiteSpace(jsonData[index]) { + setError("Improperly terminated JSON object") + break; + } + index++ + } + return []byte(transformed), globalError +} \ No newline at end of file diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go index 910b3c0064..cbb34486a6 100644 --- a/vendor/github.com/docker/cli/cli/config/config.go +++ b/vendor/github.com/docker/cli/cli/config/config.go @@ -58,7 +58,7 @@ func resetConfigDir() { // getHomeDir is a copy of [pkg/homedir.Get] to prevent adding docker/docker // as dependency for consumers that only need to read the config-file. // -// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v26.1.4+incompatible/pkg/homedir#Get +// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v28.0.3+incompatible/pkg/homedir#Get func getHomeDir() string { home, _ := os.UserHomeDir() if home == "" && runtime.GOOS != "windows" { @@ -69,6 +69,11 @@ func getHomeDir() string { return home } +// Provider defines an interface for providing the CLI config. +type Provider interface { + ConfigFile() *configfile.ConfigFile +} + // Dir returns the directory the configuration file is stored in func Dir() string { initConfigDir.Do(func() { diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go index ae9dcb3370..b9f57b72a0 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -36,12 +36,14 @@ type ConfigFile struct { NodesFormat string `json:"nodesFormat,omitempty"` PruneFilters []string `json:"pruneFilters,omitempty"` Proxies map[string]ProxyConfig `json:"proxies,omitempty"` - Experimental string `json:"experimental,omitempty"` CurrentContext string `json:"currentContext,omitempty"` CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` Plugins map[string]map[string]string `json:"plugins,omitempty"` Aliases map[string]string `json:"aliases,omitempty"` Features map[string]string `json:"features,omitempty"` + + // Deprecated: experimental CLI features are always enabled and this field is no longer used. Use [Features] instead for optional features. This field will be removed in a future release. + Experimental string `json:"experimental,omitempty"` } // ProxyConfig contains proxy configuration settings diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go index 1936234bef..93863480ba 100644 --- a/vendor/github.com/docker/docker-credential-helpers/client/command.go +++ b/vendor/github.com/docker/docker-credential-helpers/client/command.go @@ -15,27 +15,30 @@ type Program interface { // ProgramFunc is a type of function that initializes programs based on arguments. type ProgramFunc func(args ...string) Program -// NewShellProgramFunc creates programs that are executed in a Shell. -func NewShellProgramFunc(name string) ProgramFunc { - return NewShellProgramFuncWithEnv(name, nil) +// NewShellProgramFunc creates a [ProgramFunc] to run command in a [Shell]. +func NewShellProgramFunc(command string) ProgramFunc { + return func(args ...string) Program { + return createProgramCmdRedirectErr(command, args, nil) + } } -// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables -func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { +// NewShellProgramFuncWithEnv creates a [ProgramFunc] tu run command +// in a [Shell] with the given environment variables. +func NewShellProgramFuncWithEnv(command string, env *map[string]string) ProgramFunc { return func(args ...string) Program { - return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} + return createProgramCmdRedirectErr(command, args, env) } } -func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { - programCmd := exec.Command(commandName, args...) +func createProgramCmdRedirectErr(command string, args []string, env *map[string]string) *Shell { + ec := exec.Command(command, args...) if env != nil { for k, v := range *env { - programCmd.Env = append(programCmd.Environ(), k+"="+v) + ec.Env = append(ec.Environ(), k+"="+v) } } - programCmd.Stderr = os.Stderr - return programCmd + ec.Stderr = os.Stderr + return &Shell{cmd: ec} } // Shell invokes shell commands to talk with a remote credentials-helper. diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index 5f93eeb4e8..88032defe7 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -2,7 +2,9 @@ # This file lists all contributors to the repository. # See hack/generate-authors.sh to make modifications. +7sunarni <710720732@qq.com> Aanand Prasad +Aarni Koskela Aaron Davidson Aaron Feng Aaron Hnatiw @@ -11,6 +13,7 @@ Aaron L. Xu Aaron Lehmann Aaron Welch Aaron Yoshitake +Abdur Rehman Abel Muiño Abhijeet Kasurde Abhinandan Prativadi @@ -24,9 +27,11 @@ Adam Avilla Adam Dobrawy Adam Eijdenberg Adam Kunk +Adam Lamers Adam Miller Adam Mills Adam Pointer +Adam Simon Adam Singer Adam Thornton Adam Walz @@ -119,6 +124,7 @@ amangoel Amen Belayneh Ameya Gawde Amir Goldstein +AmirBuddy Amit Bakshi Amit Krishnan Amit Shukla @@ -168,6 +174,7 @@ Andrey Kolomentsev Andrey Petrov Andrey Stolbovsky André Martins +Andrés Maldonado Andy Chambers andy diller Andy Goldstein @@ -219,6 +226,7 @@ Artur Meyster Arun Gupta Asad Saeeduddin Asbjørn Enge +Ashly Mathew Austin Vazquez averagehuman Avi Das @@ -345,6 +353,7 @@ Chance Zibolski Chander Govindarajan Chanhun Jeong Chao Wang +Charity Kathure Charles Chan Charles Hooper Charles Law @@ -480,6 +489,7 @@ Daniel Farrell Daniel Garcia Daniel Gasienica Daniel Grunwell +Daniel Guns Daniel Helfand Daniel Hiltgen Daniel J Walsh @@ -763,6 +773,7 @@ Frank Macreery Frank Rosquin Frank Villaro-Dixon Frank Yang +François Scala Fred Lifton Frederick F. Kautz IV Frederico F. de Oliveira @@ -798,6 +809,7 @@ GennadySpb Geoff Levand Geoffrey Bachelet Geon Kim +George Adams George Kontridze George Ma George MacRorie @@ -826,6 +838,7 @@ Gopikannan Venugopalsamy Gosuke Miyashita Gou Rao Govinda Fichtner +Grace Choi Grant Millar Grant Reaber Graydon Hoare @@ -966,6 +979,7 @@ James Nugent James Sanders James Turnbull James Watkins-Harvey +Jameson Hyde Jamie Hannaford Jamshid Afshar Jan Breig @@ -1064,13 +1078,16 @@ Jim Perrin Jimmy Cuadra Jimmy Puckett Jimmy Song +jinjiadu Jinsoo Park Jintao Zhang Jiri Appl Jiri Popelka Jiuyue Ma Jiří Župka +jjimbo137 <115816493+jjimbo137@users.noreply.github.com> Joakim Roubert +Joan Grau Joao Fernandes Joao Trindade Joe Beda @@ -1155,6 +1172,7 @@ Josiah Kiehl José Tomás Albornoz Joyce Jang JP +JSchltggr Julian Taylor Julien Barbier Julien Bisconti @@ -1289,6 +1307,7 @@ Laura Brehm Laura Frank Laurent Bernaille Laurent Erignoux +Laurent Goderre Laurie Voss Leandro Motta Barros Leandro Siqueira @@ -1369,6 +1388,7 @@ Madhan Raj Mookkandy Madhav Puri Madhu Venugopal Mageee +maggie44 <64841595+maggie44@users.noreply.github.com> Mahesh Tiyyagura malnick Malte Janduda @@ -1579,6 +1599,7 @@ Muayyad Alsadi Muhammad Zohaib Aslam Mustafa Akın Muthukumar R +Myeongjoon Kim Máximo Cuadros Médi-Rémi Hashim Nace Oroz @@ -1593,6 +1614,7 @@ Natasha Jarus Nate Brennand Nate Eagleson Nate Jones +Nathan Baulch Nathan Carlson Nathan Herald Nathan Hsieh @@ -1655,6 +1677,7 @@ Nuutti Kotivuori nzwsch O.S. Tezer objectified +Octol1ttle Odin Ugedal Oguz Bilgic Oh Jinkyun @@ -1763,6 +1786,7 @@ Pierre Carrier Pierre Dal-Pra Pierre Wacrenier Pierre-Alain RIVIERE +pinglanlu Piotr Bogdan Piotr Karbowski Porjo @@ -1790,6 +1814,7 @@ Quentin Tayssier r0n22 Rachit Sharma Radostin Stoyanov +Rafael Fernández López Rafal Jeczalik Rafe Colton Raghavendra K T @@ -1856,7 +1881,7 @@ Robin Speekenbrink Robin Thoni robpc Rodolfo Carvalho -Rodrigo Campos +Rodrigo Campos Rodrigo Vaz Roel Van Nyen Roger Peppe @@ -1995,6 +2020,7 @@ Sevki Hasirci Shane Canon Shane da Silva Shaun Kaasten +Shaun Thompson shaunol Shawn Landden Shawn Siefkas @@ -2013,6 +2039,7 @@ Shijun Qin Shishir Mahajan Shoubhik Bose Shourya Sarcar +Shreenidhi Shedi Shu-Wai Chow shuai-z Shukui Yang @@ -2100,6 +2127,7 @@ Sébastien Stormacq Sören Tempel Tabakhase Tadej Janež +Tadeusz Dudkiewicz Takuto Sato tang0th Tangi Colin @@ -2107,6 +2135,7 @@ Tatsuki Sugiura Tatsushi Inagaki Taylan Isikdemir Taylor Jones +tcpdumppy <847462026@qq.com> Ted M. Young Tehmasp Chaudhri Tejaswini Duggaraju @@ -2391,6 +2420,7 @@ You-Sheng Yang (楊有勝) youcai Youcef YEKHLEF Youfu Zhang +YR Chen Yu Changchun Yu Chengxia Yu Peng diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index f4e7dbf37b..7f257e99ac 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-14-1 + image_family: freebsd-14-2 install_script: - pkg update -f - pkg install -y go diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index fa854785d0..6468d2cf40 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,6 +1,39 @@ # Changelog -1.8.0 2023-10-31 +1.9.0 2024-04-04 +---------------- + +### Changes and fixes + +- all: make BufferedWatcher buffered again ([#657]) + +- inotify: fix race when adding/removing watches while a watched path is being + deleted ([#678], [#686]) + +- inotify: don't send empty event if a watched path is unmounted ([#655]) + +- inotify: don't register duplicate watches when watching both a symlink and its + target; previously that would get "half-added" and removing the second would + panic ([#679]) + +- kqueue: fix watching relative symlinks ([#681]) + +- kqueue: correctly mark pre-existing entries when watching a link to a dir on + kqueue ([#682]) + +- illumos: don't send error if changed file is deleted while processing the + event ([#678]) + + +[#657]: https://github.com/fsnotify/fsnotify/pull/657 +[#678]: https://github.com/fsnotify/fsnotify/pull/678 +[#686]: https://github.com/fsnotify/fsnotify/pull/686 +[#655]: https://github.com/fsnotify/fsnotify/pull/655 +[#681]: https://github.com/fsnotify/fsnotify/pull/681 +[#679]: https://github.com/fsnotify/fsnotify/pull/679 +[#682]: https://github.com/fsnotify/fsnotify/pull/682 + +1.8.0 2024-10-31 ---------------- ### Additions diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index e4ac2a2fff..4cc40fa597 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -77,6 +77,7 @@ End-of-line escapes with `\` are not supported. debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in parallel by default, so -parallel=1 is probably a good idea). + print [any strings] # Print text to stdout; for debugging. touch path mkdir [-p] dir diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index e480733d16..1f4eb583d5 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -15,7 +15,6 @@ Platform support: | ReadDirectoryChangesW | Windows | Supported | | FEN | illumos | Supported | | fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) | -| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment | | FSEvents | macOS | [Needs support in x/sys/unix][fsevents] | | USN Journals | Windows | [Needs support in x/sys/windows][usn] | | Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) | @@ -25,7 +24,6 @@ untested. [fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120 [usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847 -[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129 Usage ----- diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index c349c326c7..57fc692848 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -9,6 +9,7 @@ package fsnotify import ( "errors" "fmt" + "io/fs" "os" "path/filepath" "sync" @@ -19,27 +20,25 @@ import ( ) type fen struct { + *shared Events chan Event Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine dirs map[string]Op // Explicitly watched directories watches map[string]Op // Explicitly watched non-directories } -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(0, ev, errs) -} +var defaultBufferSize = 0 -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { w := &fen{ + shared: newShared(ev, errs), Events: ev, Errors: errs, dirs: make(map[string]Op), watches: make(map[string]Op), - done: make(chan struct{}), } var err error @@ -52,49 +51,10 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error return w, nil } -// sendEvent attempts to send an event to the user, returning true if the event -// was put in the channel successfully and false if the watcher has been closed. -func (w *fen) sendEvent(name string, op Op) (sent bool) { - select { - case <-w.done: - return false - case w.Events <- Event{Name: name, Op: op}: - return true - } -} - -// sendError attempts to send an error to the user, returning true if the error -// was put in the channel successfully and false if the watcher has been closed. -func (w *fen) sendError(err error) (sent bool) { - if err == nil { - return true - } - select { - case <-w.done: - return false - case w.Errors <- err: - return true - } -} - -func (w *fen) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - func (w *fen) Close() error { - // Take the lock used by associateFile to prevent lingering events from - // being processed after the close - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed() { + if w.shared.close() { return nil } - close(w.done) return w.port.Close() } @@ -209,7 +169,7 @@ func (w *fen) readEvents() { return } // There was an error not caused by calling w.Close() - if !w.sendError(err) { + if !w.sendError(fmt.Errorf("port.Get: %w", err)) { return } } @@ -277,13 +237,13 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { isWatched := watchedDir || watchedPath if events&unix.FILE_DELETE != 0 { - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } reRegister = false } if events&unix.FILE_RENAME_FROM != 0 { - if !w.sendEvent(path, Rename) { + if !w.sendEvent(Event{Name: path, Op: Rename}) { return nil } // Don't keep watching the new file name @@ -297,7 +257,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { // inotify reports a Remove event in this case, so we simulate this // here. - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } // Don't keep watching the file that was removed @@ -331,7 +291,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { // get here, the sudirectory is already gone. Clearly we were watching // this path but now it is gone. Let's tell the user that it was // removed. - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } // Suppress extra write events on removed directories; they are not @@ -346,7 +306,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { if err != nil { // The symlink still exists, but the target is gone. Report the // Remove similar to above. - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } // Don't return the error @@ -359,7 +319,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { return err } } else { - if !w.sendEvent(path, Write) { + if !w.sendEvent(Event{Name: path, Op: Write}) { return nil } } @@ -367,7 +327,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { if events&unix.FILE_ATTRIB != 0 && stat != nil { // Only send Chmod if perms changed if stat.Mode().Perm() != fmode.Perm() { - if !w.sendEvent(path, Chmod) { + if !w.sendEvent(Event{Name: path, Op: Chmod}) { return nil } } @@ -376,17 +336,27 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { if stat != nil { // If we get here, it means we've hit an event above that requires us to // continue watching the file or directory - return w.associateFile(path, stat, isWatched) + err := w.associateFile(path, stat, isWatched) + if errors.Is(err, fs.ErrNotExist) { + // Path may have been removed since the stat. + err = nil + } + return err } return nil } +// The directory was modified, so we must find unwatched entities and watch +// them. If something was removed from the directory, nothing will happen, as +// everything else should still be watched. func (w *fen) updateDirectory(path string) error { - // The directory was modified, so we must find unwatched entities and watch - // them. If something was removed from the directory, nothing will happen, - // as everything else should still be watched. files, err := os.ReadDir(path) if err != nil { + // Directory no longer exists: probably just deleted since we got the + // event. + if errors.Is(err, fs.ErrNotExist) { + return nil + } return err } @@ -401,10 +371,15 @@ func (w *fen) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) + if errors.Is(err, fs.ErrNotExist) { + // File may have disappeared between getting the dir listing and + // adding the port: that's okay to ignore. + continue + } if !w.sendError(err) { return nil } - if !w.sendEvent(path, Create) { + if !w.sendEvent(Event{Name: path, Op: Create}) { return nil } } @@ -430,7 +405,7 @@ func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) if err != nil && !errors.Is(err, unix.ENOENT) { - return err + return fmt.Errorf("port.DissociatePath(%q): %w", path, err) } } @@ -446,14 +421,22 @@ func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if true { events |= unix.FILE_ATTRIB } - return w.port.AssociatePath(path, stat, events, stat.Mode()) + err := w.port.AssociatePath(path, stat, events, stat.Mode()) + if err != nil { + return fmt.Errorf("port.AssociatePath(%q): %w", path, err) + } + return nil } func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } - return w.port.DissociatePath(path) + err := w.port.DissociatePath(path) + if err != nil { + return fmt.Errorf("port.DissociatePath(%q): %w", path, err) + } + return nil } func (w *fen) WatchList() []string { diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 36c311694c..a36cb89d73 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -19,6 +19,7 @@ import ( ) type inotify struct { + *shared Events chan Event Errors chan error @@ -27,8 +28,6 @@ type inotify struct { fd int inotifyFile *os.File watches *watches - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close // Store rename cookies in an array, with the index wrapping to 0. Almost @@ -52,7 +51,6 @@ type inotify struct { type ( watches struct { - mu sync.RWMutex wd map[uint32]*watch // wd → watch path map[string]uint32 // pathname → wd } @@ -75,34 +73,13 @@ func newWatches() *watches { } } -func (w *watches) len() int { - w.mu.RLock() - defer w.mu.RUnlock() - return len(w.wd) -} - -func (w *watches) add(ww *watch) { - w.mu.Lock() - defer w.mu.Unlock() - w.wd[ww.wd] = ww - w.path[ww.path] = ww.wd -} - -func (w *watches) remove(wd uint32) { - w.mu.Lock() - defer w.mu.Unlock() - watch := w.wd[wd] // Could have had Remove() called. See #616. - if watch == nil { - return - } - delete(w.path, watch.path) - delete(w.wd, wd) -} +func (w *watches) byPath(path string) *watch { return w.wd[w.path[path]] } +func (w *watches) byWd(wd uint32) *watch { return w.wd[wd] } +func (w *watches) len() int { return len(w.wd) } +func (w *watches) add(ww *watch) { w.wd[ww.wd] = ww; w.path[ww.path] = ww.wd } +func (w *watches) remove(watch *watch) { delete(w.path, watch.path); delete(w.wd, watch.wd) } func (w *watches) removePath(path string) ([]uint32, error) { - w.mu.Lock() - defer w.mu.Unlock() - path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { @@ -123,7 +100,7 @@ func (w *watches) removePath(path string) ([]uint32, error) { wds := make([]uint32, 0, 8) wds = append(wds, wd) for p, rwd := range w.path { - if filepath.HasPrefix(p, path) { + if strings.HasPrefix(p, path) { delete(w.path, p) delete(w.wd, rwd) wds = append(wds, rwd) @@ -132,22 +109,7 @@ func (w *watches) removePath(path string) ([]uint32, error) { return wds, nil } -func (w *watches) byPath(path string) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[w.path[path]] -} - -func (w *watches) byWd(wd uint32) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[wd] -} - func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error { - w.mu.Lock() - defer w.mu.Unlock() - var existing *watch wd, ok := w.path[path] if ok { @@ -170,11 +132,9 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(0, ev, errs) -} +var defaultBufferSize = 0 -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -183,12 +143,12 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error } w := &inotify{ + shared: newShared(ev, errs), Events: ev, Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -196,46 +156,10 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error return w, nil } -// Returns true if the event was sent, or false if watcher is closed. -func (w *inotify) sendEvent(e Event) bool { - select { - case <-w.done: - return false - case w.Events <- e: - return true - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *inotify) sendError(err error) bool { - if err == nil { - return true - } - select { - case <-w.done: - return false - case w.Errors <- err: - return true - } -} - -func (w *inotify) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - func (w *inotify) Close() error { - w.doneMu.Lock() - if w.isClosed() { - w.doneMu.Unlock() + if w.shared.close() { return nil } - close(w.done) - w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -244,9 +168,7 @@ func (w *inotify) Close() error { return err } - // Wait for goroutine to close - <-w.doneResp - + <-w.doneResp // Wait for readEvents() to finish. return nil } @@ -266,6 +188,43 @@ func (w *inotify) AddWith(path string, opts ...addOpt) error { return fmt.Errorf("%w: %s", xErrUnsupported, with.op) } + add := func(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) + } + + w.mu.Lock() + defer w.mu.Unlock() path, recurse := recursivePath(path) if recurse { return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { @@ -289,46 +248,11 @@ func (w *inotify) AddWith(path string, opts ...addOpt) error { w.sendEvent(Event{Name: root, Op: Create}) } - return w.add(root, with, true) + return add(root, with, true) }) } - return w.add(path, with, false) -} - -func (w *inotify) add(path string, with withOpts, recurse bool) error { - var flags uint32 - if with.noFollow { - flags |= unix.IN_DONT_FOLLOW - } - if with.op.Has(Create) { - flags |= unix.IN_CREATE - } - if with.op.Has(Write) { - flags |= unix.IN_MODIFY - } - if with.op.Has(Remove) { - flags |= unix.IN_DELETE | unix.IN_DELETE_SELF - } - if with.op.Has(Rename) { - flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF - } - if with.op.Has(Chmod) { - flags |= unix.IN_ATTRIB - } - if with.op.Has(xUnportableOpen) { - flags |= unix.IN_OPEN - } - if with.op.Has(xUnportableRead) { - flags |= unix.IN_ACCESS - } - if with.op.Has(xUnportableCloseWrite) { - flags |= unix.IN_CLOSE_WRITE - } - if with.op.Has(xUnportableCloseRead) { - flags |= unix.IN_CLOSE_NOWRITE - } - return w.register(path, flags, recurse) + return add(path, with, false) } func (w *inotify) register(path string, flags uint32, recurse bool) error { @@ -342,6 +266,10 @@ func (w *inotify) register(path string, flags uint32, recurse bool) error { return nil, err } + if e, ok := w.watches.wd[uint32(wd)]; ok { + return e, nil + } + if existing == nil { return &watch{ wd: uint32(wd), @@ -365,6 +293,9 @@ func (w *inotify) Remove(name string) error { fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", time.Now().Format("15:04:05.000000000"), name) } + + w.mu.Lock() + defer w.mu.Unlock() return w.remove(filepath.Clean(name)) } @@ -399,13 +330,12 @@ func (w *inotify) WatchList() []string { return nil } + w.mu.Lock() + defer w.mu.Unlock() entries := make([]string, 0, w.watches.len()) - w.watches.mu.RLock() for pathname := range w.watches.path { entries = append(entries, pathname) } - w.watches.mu.RUnlock() - return entries } @@ -418,21 +348,17 @@ func (w *inotify) readEvents() { close(w.Events) }() - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - errno error // Syscall errno - ) + var buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events for { - // See if we have been closed. if w.isClosed() { return } n, err := w.inotifyFile.Read(buf[:]) - switch { - case errors.Unwrap(err) == os.ErrClosed: - return - case err != nil: + if err != nil { + if errors.Is(err, os.ErrClosed) { + return + } if !w.sendError(err) { return } @@ -440,13 +366,9 @@ func (w *inotify) readEvents() { } if n < unix.SizeofInotifyEvent { - var err error + err := errors.New("notify: short read in readEvents()") // Read was too short. if n == 0 { err = io.EOF // If EOF is received. This should really never happen. - } else if n < 0 { - err = errno // If an error occurred while reading. - } else { - err = errors.New("notify: short read in readEvents()") // Read was too short. } if !w.sendError(err) { return @@ -454,132 +376,135 @@ func (w *inotify) readEvents() { continue } - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... + // We don't know how many events we just read into the buffer While the + // offset points to at least one whole event. var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { - var ( - // Point "raw" to the event in the buffer - raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - mask = uint32(raw.Mask) - nameLen = uint32(raw.Len) - // Move to the next event in the buffer - next = func() { offset += unix.SizeofInotifyEvent + nameLen } - ) - - if mask&unix.IN_Q_OVERFLOW != 0 { + // Point to the event in the buffer. + inEvent := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + if inEvent.Mask&unix.IN_Q_OVERFLOW != 0 { if !w.sendError(ErrEventOverflow) { return } } - /// If the event happened to the watched directory or the watched - /// file, the kernel doesn't append the filename to the event, but - /// we would like to always fill the the "Name" field with a valid - /// filename. We retrieve the path of the watch from the "paths" - /// map. - watch := w.watches.byWd(uint32(raw.Wd)) - /// Can be nil if Remove() was called in another goroutine for this - /// path inbetween reading the events from the kernel and reading - /// the internal state. Not much we can do about it, so just skip. - /// See #616. - if watch == nil { - next() - continue + ev, ok := w.handleEvent(inEvent, &buf, offset) + if !ok { + return } - - name := watch.path - if nameLen > 0 { - /// Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - /// The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + if !w.sendEvent(ev) { + return } - if debug { - internal.Debug(name, raw.Mask, raw.Cookie) - } + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + inEvent.Len + } + } +} - if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 - next() - continue - } +func (w *inotify) handleEvent(inEvent *unix.InotifyEvent, buf *[65536]byte, offset uint32) (Event, bool) { + w.mu.Lock() + defer w.mu.Unlock() - // inotify will automatically remove the watch on deletes; just need - // to clean our state here. - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - w.watches.remove(watch.wd) - } + /// If the event happened to the watched directory or the watched file, the + /// kernel doesn't append the filename to the event, but we would like to + /// always fill the the "Name" field with a valid filename. We retrieve the + /// path of the watch from the "paths" map. + /// + /// Can be nil if Remove() was called in another goroutine for this path + /// inbetween reading the events from the kernel and reading the internal + /// state. Not much we can do about it, so just skip. See #616. + watch := w.watches.byWd(uint32(inEvent.Wd)) + if watch == nil { + return Event{}, true + } - // We can't really update the state when a watched path is moved; - // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove - // the watch. - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { - if watch.recurse { - next() // Do nothing - continue - } + var ( + name = watch.path + nameLen = uint32(inEvent.Len) + ) + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bb := *buf + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&bb[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\x00") + } - err := w.remove(watch.path) - if err != nil && !errors.Is(err, ErrNonExistentWatch) { - if !w.sendError(err) { - return - } - } + if debug { + internal.Debug(name, inEvent.Mask, inEvent.Cookie) + } + + if inEvent.Mask&unix.IN_IGNORED != 0 || inEvent.Mask&unix.IN_UNMOUNT != 0 { + w.watches.remove(watch) + return Event{}, true + } + + // inotify will automatically remove the watch on deletes; just need + // to clean our state here. + if inEvent.Mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + w.watches.remove(watch) + } + + // We can't really update the state when a watched path is moved; only + // IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove the watch. + if inEvent.Mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { // Do nothing + return Event{}, true + } + + err := w.remove(watch.path) + if err != nil && !errors.Is(err, ErrNonExistentWatch) { + if !w.sendError(err) { + return Event{}, false } + } + } - /// Skip if we're watching both this path and the parent; the parent - /// will already send a delete so no need to do it twice. - if mask&unix.IN_DELETE_SELF != 0 { - if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { - next() - continue - } + /// Skip if we're watching both this path and the parent; the parent will + /// already send a delete so no need to do it twice. + if inEvent.Mask&unix.IN_DELETE_SELF != 0 { + _, ok := w.watches.path[filepath.Dir(watch.path)] + if ok { + return Event{}, true + } + } + + ev := w.newEvent(name, inEvent.Mask, inEvent.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := inEvent.Mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return Event{}, false } - ev := w.newEvent(name, mask, raw.Cookie) - // Need to update watch path for recurse. - if watch.recurse { - isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR - /// New directory created: set up watch on it. - if isDir && ev.Has(Create) { - err := w.register(ev.Name, watch.flags, true) - if !w.sendError(err) { - return + // This was a directory rename, so we need to update all the + // children. + // + // TODO: this is of course pretty slow; we should use a better data + // structure for storing all of this, e.g. store children in the + // watch. I have some code for this in my kqueue refactor we can use + // in the future. For now I'm okay with this as it's not publicly + // available. Correctness first, performance second. + if ev.renamedFrom != "" { + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue } - - // This was a directory rename, so we need to update all - // the children. - // - // TODO: this is of course pretty slow; we should use a - // better data structure for storing all of this, e.g. store - // children in the watch. I have some code for this in my - // kqueue refactor we can use in the future. For now I'm - // okay with this as it's not publicly available. - // Correctness first, performance second. - if ev.renamedFrom != "" { - w.watches.mu.Lock() - for k, ww := range w.watches.wd { - if k == watch.wd || ww.path == ev.Name { - continue - } - if strings.HasPrefix(ww.path, ev.renamedFrom) { - ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) - w.watches.wd[k] = ww - } - } - w.watches.mu.Unlock() + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww } } } - - /// Send the events that are not ignored on the events channel - if !w.sendEvent(ev) { - return - } - next() } } + + return ev, true } func (w *inotify) isRecursive(path string) bool { @@ -650,8 +575,8 @@ func (w *inotify) xSupports(op Op) bool { } func (w *inotify) state() { - w.watches.mu.Lock() - defer w.watches.mu.Unlock() + w.mu.Lock() + defer w.mu.Unlock() for wd, ww := range w.watches.wd { fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index d8de5ab76f..340aeec061 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -16,14 +16,13 @@ import ( ) type kqueue struct { + *shared Events chan Event Errors chan error kq int // File descriptor (as returned by the kqueue() syscall). closepipe [2]int // Pipe used for closing kq. watches *watches - done chan struct{} - doneMu sync.Mutex } type ( @@ -132,14 +131,18 @@ func (w *watches) byPath(path string) (watch, bool) { return info, ok } -func (w *watches) updateDirFlags(path string, flags uint32) { +func (w *watches) updateDirFlags(path string, flags uint32) bool { w.mu.Lock() defer w.mu.Unlock() - fd := w.path[path] + fd, ok := w.path[path] + if !ok { // Already deleted: don't re-set it here. + return false + } info := w.wd[fd] info.dirFlags = flags w.wd[fd] = info + return true } func (w *watches) remove(fd int, path string) bool { @@ -179,22 +182,20 @@ func (w *watches) seenBefore(path string) bool { return ok } -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(0, ev, errs) -} +var defaultBufferSize = 0 -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } w := &kqueue{ + shared: newShared(ev, errs), Events: ev, Errors: errs, kq: kq, closepipe: closepipe, - done: make(chan struct{}), watches: newWatches(), } @@ -210,7 +211,7 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error // all. func newKqueue() (kq int, closepipe [2]int, err error) { kq, err = unix.Kqueue() - if kq == -1 { + if err != nil { return kq, closepipe, err } @@ -239,54 +240,17 @@ func newKqueue() (kq int, closepipe [2]int, err error) { return kq, closepipe, nil } -// Returns true if the event was sent, or false if watcher is closed. -func (w *kqueue) sendEvent(e Event) bool { - select { - case <-w.done: - return false - case w.Events <- e: - return true - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *kqueue) sendError(err error) bool { - if err == nil { - return true - } - select { - case <-w.done: - return false - case w.Errors <- err: - return true - } -} - -func (w *kqueue) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - func (w *kqueue) Close() error { - w.doneMu.Lock() - if w.isClosed() { - w.doneMu.Unlock() + if w.shared.close() { return nil } - close(w.done) - w.doneMu.Unlock() pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } - // Send "quit" message to the reader goroutine. - unix.Close(w.closepipe[1]) + unix.Close(w.closepipe[1]) // Send "quit" message to readEvents return nil } @@ -303,7 +267,7 @@ func (w *kqueue) AddWith(name string, opts ...addOpt) error { return fmt.Errorf("%w: %s", xErrUnsupported, with.op) } - _, err := w.addWatch(name, noteAllEvents) + _, err := w.addWatch(name, noteAllEvents, false) if err != nil { return err } @@ -366,7 +330,7 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *kqueue) addWatch(name string, flags uint32) (string, error) { +func (w *kqueue) addWatch(name string, flags uint32, listDir bool) (string, error) { if w.isClosed() { return "", ErrClosed } @@ -385,15 +349,15 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) { return "", nil } - // Follow symlinks. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + // Follow symlinks, but only for paths added with Add(), and not paths + // we're adding from internalWatch from a listdir. + if !listDir && fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { - // Return nil because Linux can add unresolvable symlinks to the - // watch list without problems, so maintain consistency with - // that. There will be no file events for broken symlinks. - // TODO: more specific check; returns os.PathError; ENOENT? - return "", nil + return "", err + } + if !filepath.IsAbs(link) { + link = filepath.Join(filepath.Dir(name), link) } _, alreadyWatching = w.watches.byPath(link) @@ -408,7 +372,7 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) { name = link fi, err = os.Lstat(name) if err != nil { - return "", nil + return "", err } } @@ -422,7 +386,6 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) { if errors.Is(err, unix.EINTR) { continue } - return "", err } @@ -444,10 +407,16 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) { if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) - w.watches.updateDirFlags(name, flags) + if !w.watches.updateDirFlags(name, flags) { + return "", nil + } if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { + d := name + if info.linkName != "" { + d = info.linkName + } + if err := w.watchDirectoryFiles(d); err != nil { return "", err } } @@ -644,19 +613,22 @@ func (w *kqueue) dirChange(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.dirChange: %w", err) + return fmt.Errorf("fsnotify.dirChange %q: %w", dir, err) } for _, f := range files { fi, err := f.Info() if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil + } return fmt.Errorf("fsnotify.dirChange: %w", err) } err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. - if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { + if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) || errors.Is(err, os.ErrNotExist) { return nil } return fmt.Errorf("fsnotify.dirChange: %w", err) @@ -688,11 +660,11 @@ func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory info, _ := w.watches.byPath(name) - return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME, true) } - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) + // Watch file to mimic Linux inotify. + return w.addWatch(name, noteAllEvents, true) } // Register events with the queue. @@ -722,9 +694,9 @@ func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { } func (w *kqueue) xSupports(op Op) bool { - if runtime.GOOS == "freebsd" { - //return true // Supports everything. - } + //if runtime.GOOS == "freebsd" { + // return true // Supports everything. + //} if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { return false diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index 5eb5dbc66f..b8c0ad7226 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -9,12 +9,11 @@ type other struct { Errors chan error } +var defaultBufferSize = 0 + func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { - return newBackend(ev, errs) -} func (w *other) Close() error { return nil } func (w *other) WatchList() []string { return nil } func (w *other) Add(name string) error { return nil } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index c54a630838..3433642d64 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -28,18 +28,16 @@ type readDirChangesW struct { port windows.Handle // Handle to completion port input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error + done chan chan<- error mu sync.Mutex // Protects access to watches, closed watches watchMap // Map of watches (key: i-number) closed bool // Set to true when Close() is first called } -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(50, ev, errs) -} +var defaultBufferSize = 50 -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) @@ -50,7 +48,7 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error port: port, watches: make(watchMap), input: make(chan *input, 1), - quit: make(chan chan<- error, 1), + done: make(chan chan<- error, 1), } go w.readEvents() return w, nil @@ -70,8 +68,8 @@ func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool event := w.newEvent(name, uint32(mask)) event.renamedFrom = renamedFrom select { - case ch := <-w.quit: - w.quit <- ch + case ch := <-w.done: + w.done <- ch case w.Events <- event: } return true @@ -83,10 +81,10 @@ func (w *readDirChangesW) sendError(err error) bool { return true } select { + case <-w.done: + return false case w.Errors <- err: return true - case <-w.quit: - return false } } @@ -99,9 +97,9 @@ func (w *readDirChangesW) Close() error { w.closed = true w.mu.Unlock() - // Send "quit" message to the reader goroutine + // Send "done" message to the reader goroutine ch := make(chan error) - w.quit <- ch + w.done <- ch if err := w.wakeupReader(); err != nil { return err } @@ -495,7 +493,7 @@ func (w *readDirChangesW) readEvents() { watch := (*watch)(unsafe.Pointer(ov)) if watch == nil { select { - case ch := <-w.quit: + case ch := <-w.done: w.mu.Lock() var indexes []indexMap for _, index := range w.watches { diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 0760efe916..f64be4bf98 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -244,12 +244,13 @@ var ( // ErrUnsupported is returned by AddWith() when WithOps() specified an // Unportable event that's not supported on this platform. + //lint:ignore ST1012 not relevant xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { - ev, errs := make(chan Event), make(chan error) + ev, errs := make(chan Event, defaultBufferSize), make(chan error) b, err := newBackend(ev, errs) if err != nil { return nil, err @@ -266,8 +267,8 @@ func NewWatcher() (*Watcher, error) { // cases, and whenever possible you will be better off increasing the kernel // buffers instead of adding a large userspace buffer. func NewBufferedWatcher(sz uint) (*Watcher, error) { - ev, errs := make(chan Event), make(chan error) - b, err := newBufferedBackend(sz, ev, errs) + ev, errs := make(chan Event, sz), make(chan error) + b, err := newBackend(ev, errs) if err != nil { return nil, err } @@ -337,7 +338,8 @@ func (w *Watcher) Close() error { return w.b.Close() } // WatchList returns all paths explicitly added with [Watcher.Add] (and are not // yet removed). // -// Returns nil if [Watcher.Close] was called. +// The order is undefined, and may differ per call. Returns nil if +// [Watcher.Close] was called. func (w *Watcher) WatchList() []string { return w.b.WatchList() } // Supports reports if all the listed operations are supported by this platform. diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go index b0eab10090..0b01bc182a 100644 --- a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -9,14 +9,14 @@ import ( ) var ( - SyscallEACCES = syscall.EACCES - UnixEACCES = unix.EACCES + ErrSyscallEACCES = syscall.EACCES + ErrUnixEACCES = unix.EACCES ) var maxfiles uint64 -// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ var l syscall.Rlimit err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) if err == nil && l.Cur != l.Max { diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go index 547df1df84..5ac8b50797 100644 --- a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -9,8 +9,8 @@ import ( ) var ( - SyscallEACCES = syscall.EACCES - UnixEACCES = unix.EACCES + ErrSyscallEACCES = syscall.EACCES + ErrUnixEACCES = unix.EACCES ) var maxfiles uint64 diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go index 30976ce973..b251fb8038 100644 --- a/vendor/github.com/fsnotify/fsnotify/internal/unix.go +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -1,4 +1,4 @@ -//go:build !windows && !darwin && !freebsd +//go:build !windows && !darwin && !freebsd && !plan9 package internal @@ -9,8 +9,8 @@ import ( ) var ( - SyscallEACCES = syscall.EACCES - UnixEACCES = unix.EACCES + ErrSyscallEACCES = syscall.EACCES + ErrUnixEACCES = unix.EACCES ) var maxfiles uint64 diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go index a72c649549..896bc2e5a2 100644 --- a/vendor/github.com/fsnotify/fsnotify/internal/windows.go +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -10,8 +10,8 @@ import ( // Just a dummy. var ( - SyscallEACCES = errors.New("dummy") - UnixEACCES = errors.New("dummy") + ErrSyscallEACCES = errors.New("dummy") + ErrUnixEACCES = errors.New("dummy") ) func SetRlimit() {} diff --git a/vendor/github.com/fsnotify/fsnotify/shared.go b/vendor/github.com/fsnotify/fsnotify/shared.go new file mode 100644 index 0000000000..3ee9b58f1d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/shared.go @@ -0,0 +1,64 @@ +package fsnotify + +import "sync" + +type shared struct { + Events chan Event + Errors chan error + done chan struct{} + mu sync.Mutex +} + +func newShared(ev chan Event, errs chan error) *shared { + return &shared{ + Events: ev, + Errors: errs, + done: make(chan struct{}), + } +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *shared) sendEvent(e Event) bool { + if e.Op == 0 { + return true + } + select { + case <-w.done: + return false + case w.Events <- e: + return true + } +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *shared) sendError(err error) bool { + if err == nil { + return true + } + select { + case <-w.done: + return false + case w.Errors <- err: + return true + } +} + +func (w *shared) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Mark as closed; returns true if it was already closed. +func (w *shared) close() bool { + w.mu.Lock() + defer w.mu.Unlock() + if w.isClosed() { + return true + } + close(w.done) + return false +} diff --git a/vendor/github.com/fsnotify/fsnotify/staticcheck.conf b/vendor/github.com/fsnotify/fsnotify/staticcheck.conf new file mode 100644 index 0000000000..8fa7351f0c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/staticcheck.conf @@ -0,0 +1,3 @@ +checks = ['all', + '-U1000', # Don't complain about unused functions. +] diff --git a/vendor/github.com/go-openapi/analysis/.codecov.yml b/vendor/github.com/go-openapi/analysis/.codecov.yml new file mode 100644 index 0000000000..841c4281e2 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.codecov.yml @@ -0,0 +1,5 @@ +coverage: + status: + patch: + default: + target: 80% diff --git a/vendor/github.com/go-openapi/analysis/.gitattributes b/vendor/github.com/go-openapi/analysis/.gitattributes new file mode 100644 index 0000000000..d020be8ea4 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.gitattributes @@ -0,0 +1,2 @@ +*.go text eol=lf + diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore new file mode 100644 index 0000000000..87c3bd3e66 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +coverage.txt +*.cov +.idea diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml new file mode 100644 index 0000000000..22f8d21cca --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/analysis/LICENSE b/vendor/github.com/go-openapi/analysis/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md new file mode 100644 index 0000000000..e005d4b37b --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/README.md @@ -0,0 +1,27 @@ +# OpenAPI analysis [![Build Status](https://github.com/go-openapi/analysis/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/analysis/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/analysis.svg)](https://pkg.go.dev/github.com/go-openapi/analysis) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/analysis)](https://goreportcard.com/report/github.com/go-openapi/analysis) + + +A foundational library to analyze an OAI specification document for easier reasoning about the content. + +## What's inside? + +* An analyzer providing methods to walk the functional content of a specification +* A spec flattener producing a self-contained document bundle, while preserving `$ref`s +* A spec merger ("mixin") to merge several spec documents into a primary spec +* A spec "fixer" ensuring that response descriptions are non empty + +[Documentation](https://pkg.go.dev/github.com/go-openapi/analysis) + +## FAQ + +* Does this library support OpenAPI 3? + +> No. +> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). +> There is no plan to make it evolve toward supporting OpenAPI 3.x. +> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go new file mode 100644 index 0000000000..c17aee1b61 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/analyzer.go @@ -0,0 +1,1064 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + slashpath "path" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +type referenceAnalysis struct { + schemas map[string]spec.Ref + responses map[string]spec.Ref + parameters map[string]spec.Ref + items map[string]spec.Ref + headerItems map[string]spec.Ref + parameterItems map[string]spec.Ref + allRefs map[string]spec.Ref + pathItems map[string]spec.Ref +} + +func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { + r.allRefs["#"+key] = ref +} + +func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items, location string) { + r.items["#"+key] = items.Ref + r.addRef(key, items.Ref) + if location == "header" { + // NOTE: in swagger 2.0, headers and parameters (but not body param schemas) are simple schemas + // and $ref are not supported here. However it is possible to analyze this. + r.headerItems["#"+key] = items.Ref + } else { + r.parameterItems["#"+key] = items.Ref + } +} + +func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { + r.schemas["#"+key] = ref.Schema.Ref + r.addRef(key, ref.Schema.Ref) +} + +func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { + r.responses["#"+key] = resp.Ref + r.addRef(key, resp.Ref) +} + +func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { + r.parameters["#"+key] = param.Ref + r.addRef(key, param.Ref) +} + +func (r *referenceAnalysis) addPathItemRef(key string, pathItem *spec.PathItem) { + r.pathItems["#"+key] = pathItem.Ref + r.addRef(key, pathItem.Ref) +} + +type patternAnalysis struct { + parameters map[string]string + headers map[string]string + items map[string]string + schemas map[string]string + allPatterns map[string]string +} + +func (p *patternAnalysis) addPattern(key, pattern string) { + p.allPatterns["#"+key] = pattern +} + +func (p *patternAnalysis) addParameterPattern(key, pattern string) { + p.parameters["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addHeaderPattern(key, pattern string) { + p.headers["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addItemsPattern(key, pattern string) { + p.items["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addSchemaPattern(key, pattern string) { + p.schemas["#"+key] = pattern + p.addPattern(key, pattern) +} + +type enumAnalysis struct { + parameters map[string][]interface{} + headers map[string][]interface{} + items map[string][]interface{} + schemas map[string][]interface{} + allEnums map[string][]interface{} +} + +func (p *enumAnalysis) addEnum(key string, enum []interface{}) { + p.allEnums["#"+key] = enum +} + +func (p *enumAnalysis) addParameterEnum(key string, enum []interface{}) { + p.parameters["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addHeaderEnum(key string, enum []interface{}) { + p.headers["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addItemsEnum(key string, enum []interface{}) { + p.items["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addSchemaEnum(key string, enum []interface{}) { + p.schemas["#"+key] = enum + p.addEnum(key, enum) +} + +// New takes a swagger spec object and returns an analyzed spec document. +// The analyzed document contains a number of indices that make it easier to +// reason about semantics of a swagger specification for use in code generation +// or validation etc. +func New(doc *spec.Swagger) *Spec { + a := &Spec{ + spec: doc, + references: referenceAnalysis{}, + patterns: patternAnalysis{}, + enums: enumAnalysis{}, + } + a.reset() + a.initialize() + + return a +} + +// Spec is an analyzed specification object. It takes a swagger spec object and turns it into a registry +// with a bunch of utility methods to act on the information in the spec. +type Spec struct { + spec *spec.Swagger + consumes map[string]struct{} + produces map[string]struct{} + authSchemes map[string]struct{} + operations map[string]map[string]*spec.Operation + references referenceAnalysis + patterns patternAnalysis + enums enumAnalysis + allSchemas map[string]SchemaRef + allOfs map[string]SchemaRef +} + +func (s *Spec) reset() { + s.consumes = make(map[string]struct{}, 150) + s.produces = make(map[string]struct{}, 150) + s.authSchemes = make(map[string]struct{}, 150) + s.operations = make(map[string]map[string]*spec.Operation, 150) + s.allSchemas = make(map[string]SchemaRef, 150) + s.allOfs = make(map[string]SchemaRef, 150) + s.references.schemas = make(map[string]spec.Ref, 150) + s.references.pathItems = make(map[string]spec.Ref, 150) + s.references.responses = make(map[string]spec.Ref, 150) + s.references.parameters = make(map[string]spec.Ref, 150) + s.references.items = make(map[string]spec.Ref, 150) + s.references.headerItems = make(map[string]spec.Ref, 150) + s.references.parameterItems = make(map[string]spec.Ref, 150) + s.references.allRefs = make(map[string]spec.Ref, 150) + s.patterns.parameters = make(map[string]string, 150) + s.patterns.headers = make(map[string]string, 150) + s.patterns.items = make(map[string]string, 150) + s.patterns.schemas = make(map[string]string, 150) + s.patterns.allPatterns = make(map[string]string, 150) + s.enums.parameters = make(map[string][]interface{}, 150) + s.enums.headers = make(map[string][]interface{}, 150) + s.enums.items = make(map[string][]interface{}, 150) + s.enums.schemas = make(map[string][]interface{}, 150) + s.enums.allEnums = make(map[string][]interface{}, 150) +} + +func (s *Spec) reload() { + s.reset() + s.initialize() +} + +func (s *Spec) initialize() { + for _, c := range s.spec.Consumes { + s.consumes[c] = struct{}{} + } + for _, c := range s.spec.Produces { + s.produces[c] = struct{}{} + } + for _, ss := range s.spec.Security { + for k := range ss { + s.authSchemes[k] = struct{}{} + } + } + for path, pathItem := range s.AllPaths() { + s.analyzeOperations(path, &pathItem) //#nosec + } + + for name, parameter := range s.spec.Parameters { + refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) + if parameter.Items != nil { + s.analyzeItems("items", parameter.Items, refPref, "parameter") + } + if parameter.In == "body" && parameter.Schema != nil { + s.analyzeSchema("schema", parameter.Schema, refPref) + } + if parameter.Pattern != "" { + s.patterns.addParameterPattern(refPref, parameter.Pattern) + } + if len(parameter.Enum) > 0 { + s.enums.addParameterEnum(refPref, parameter.Enum) + } + } + + for name, response := range s.spec.Responses { + refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) + for k, v := range response.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + if v.Items != nil { + s.analyzeItems("items", v.Items, hRefPref, "header") + } + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + if len(v.Enum) > 0 { + s.enums.addHeaderEnum(hRefPref, v.Enum) + } + } + if response.Schema != nil { + s.analyzeSchema("schema", response.Schema, refPref) + } + } + + for name := range s.spec.Definitions { + schema := s.spec.Definitions[name] + s.analyzeSchema(name, &schema, "/definitions") + } + // TODO: after analyzing all things and flattening schemas etc + // resolve all the collected references to their final representations + // best put in a separate method because this could get expensive +} + +func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { + // TODO: resolve refs here? + // Currently, operations declared via pathItem $ref are known only after expansion + op := pi + if pi.Ref.String() != "" { + key := slashpath.Join("/paths", jsonpointer.Escape(path)) + s.references.addPathItemRef(key, pi) + } + s.analyzeOperation("GET", path, op.Get) + s.analyzeOperation("PUT", path, op.Put) + s.analyzeOperation("POST", path, op.Post) + s.analyzeOperation("PATCH", path, op.Patch) + s.analyzeOperation("DELETE", path, op.Delete) + s.analyzeOperation("HEAD", path, op.Head) + s.analyzeOperation("OPTIONS", path, op.Options) + for i, param := range op.Parameters { + refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) + if param.Ref.String() != "" { + s.references.addParamRef(refPref, ¶m) //#nosec + } + if param.Pattern != "" { + s.patterns.addParameterPattern(refPref, param.Pattern) + } + if len(param.Enum) > 0 { + s.enums.addParameterEnum(refPref, param.Enum) + } + if param.Items != nil { + s.analyzeItems("items", param.Items, refPref, "parameter") + } + if param.Schema != nil { + s.analyzeSchema("schema", param.Schema, refPref) + } + } +} + +func (s *Spec) analyzeItems(name string, items *spec.Items, prefix, location string) { + if items == nil { + return + } + refPref := slashpath.Join(prefix, name) + s.analyzeItems(name, items.Items, refPref, location) + if items.Ref.String() != "" { + s.references.addItemsRef(refPref, items, location) + } + if items.Pattern != "" { + s.patterns.addItemsPattern(refPref, items.Pattern) + } + if len(items.Enum) > 0 { + s.enums.addItemsEnum(refPref, items.Enum) + } +} + +func (s *Spec) analyzeParameter(prefix string, i int, param spec.Parameter) { + refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) + if param.Ref.String() != "" { + s.references.addParamRef(refPref, ¶m) //#nosec + } + + if param.Pattern != "" { + s.patterns.addParameterPattern(refPref, param.Pattern) + } + + if len(param.Enum) > 0 { + s.enums.addParameterEnum(refPref, param.Enum) + } + + s.analyzeItems("items", param.Items, refPref, "parameter") + if param.In == "body" && param.Schema != nil { + s.analyzeSchema("schema", param.Schema, refPref) + } +} + +func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { + if op == nil { + return + } + + for _, c := range op.Consumes { + s.consumes[c] = struct{}{} + } + + for _, c := range op.Produces { + s.produces[c] = struct{}{} + } + + for _, ss := range op.Security { + for k := range ss { + s.authSchemes[k] = struct{}{} + } + } + + if _, ok := s.operations[method]; !ok { + s.operations[method] = make(map[string]*spec.Operation) + } + + s.operations[method][path] = op + prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) + for i, param := range op.Parameters { + s.analyzeParameter(prefix, i, param) + } + + if op.Responses == nil { + return + } + + if op.Responses.Default != nil { + s.analyzeDefaultResponse(prefix, op.Responses.Default) + } + + for k, res := range op.Responses.StatusCodeResponses { + s.analyzeResponse(prefix, k, res) + } +} + +func (s *Spec) analyzeDefaultResponse(prefix string, res *spec.Response) { + refPref := slashpath.Join(prefix, "responses", "default") + if res.Ref.String() != "" { + s.references.addResponseRef(refPref, res) + } + + for k, v := range res.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + s.analyzeItems("items", v.Items, hRefPref, "header") + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + } + + if res.Schema != nil { + s.analyzeSchema("schema", res.Schema, refPref) + } +} + +func (s *Spec) analyzeResponse(prefix string, k int, res spec.Response) { + refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) + if res.Ref.String() != "" { + s.references.addResponseRef(refPref, &res) //#nosec + } + + for k, v := range res.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + s.analyzeItems("items", v.Items, hRefPref, "header") + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + + if len(v.Enum) > 0 { + s.enums.addHeaderEnum(hRefPref, v.Enum) + } + } + + if res.Schema != nil { + s.analyzeSchema("schema", res.Schema, refPref) + } +} + +func (s *Spec) analyzeSchema(name string, schema *spec.Schema, prefix string) { + refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) + schRef := SchemaRef{ + Name: name, + Schema: schema, + Ref: spec.MustCreateRef("#" + refURI), + TopLevel: prefix == "/definitions", + } + + s.allSchemas["#"+refURI] = schRef + + if schema.Ref.String() != "" { + s.references.addSchemaRef(refURI, schRef) + } + + if schema.Pattern != "" { + s.patterns.addSchemaPattern(refURI, schema.Pattern) + } + + if len(schema.Enum) > 0 { + s.enums.addSchemaEnum(refURI, schema.Enum) + } + + for k, v := range schema.Definitions { + v := v + s.analyzeSchema(k, &v, slashpath.Join(refURI, "definitions")) + } + + for k, v := range schema.Properties { + v := v + s.analyzeSchema(k, &v, slashpath.Join(refURI, "properties")) + } + + for k, v := range schema.PatternProperties { + v := v + // NOTE: swagger 2.0 does not support PatternProperties. + // However it is possible to analyze this in a schema + s.analyzeSchema(k, &v, slashpath.Join(refURI, "patternProperties")) + } + + for i := range schema.AllOf { + v := &schema.AllOf[i] + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) + } + + if len(schema.AllOf) > 0 { + s.allOfs["#"+refURI] = schRef + } + + for i := range schema.AnyOf { + v := &schema.AnyOf[i] + // NOTE: swagger 2.0 does not support anyOf constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) + } + + for i := range schema.OneOf { + v := &schema.OneOf[i] + // NOTE: swagger 2.0 does not support oneOf constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) + } + + if schema.Not != nil { + // NOTE: swagger 2.0 does not support "not" constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema("not", schema.Not, refURI) + } + + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + s.analyzeSchema("additionalProperties", schema.AdditionalProperties.Schema, refURI) + } + + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + // NOTE: swagger 2.0 does not support AdditionalItems. + // However it is possible to analyze this in a schema + s.analyzeSchema("additionalItems", schema.AdditionalItems.Schema, refURI) + } + + if schema.Items != nil { + if schema.Items.Schema != nil { + s.analyzeSchema("items", schema.Items.Schema, refURI) + } + + for i := range schema.Items.Schemas { + sch := &schema.Items.Schemas[i] + s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) + } + } +} + +// SecurityRequirement is a representation of a security requirement for an operation +type SecurityRequirement struct { + Name string + Scopes []string +} + +// SecurityRequirementsFor gets the security requirements for the operation +func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) [][]SecurityRequirement { + if s.spec.Security == nil && operation.Security == nil { + return nil + } + + schemes := s.spec.Security + if operation.Security != nil { + schemes = operation.Security + } + + result := [][]SecurityRequirement{} + for _, scheme := range schemes { + if len(scheme) == 0 { + // append a zero object for anonymous + result = append(result, []SecurityRequirement{{}}) + + continue + } + + var reqs []SecurityRequirement + for k, v := range scheme { + if v == nil { + v = []string{} + } + reqs = append(reqs, SecurityRequirement{Name: k, Scopes: v}) + } + + result = append(result, reqs) + } + + return result +} + +// SecurityDefinitionsForRequirements gets the matching security definitions for a set of requirements +func (s *Spec) SecurityDefinitionsForRequirements(requirements []SecurityRequirement) map[string]spec.SecurityScheme { + result := make(map[string]spec.SecurityScheme) + + for _, v := range requirements { + if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { + if definition != nil { + result[v.Name] = *definition + } + } + } + + return result +} + +// SecurityDefinitionsFor gets the matching security definitions for a set of requirements +func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { + requirements := s.SecurityRequirementsFor(operation) + if len(requirements) == 0 { + return nil + } + + result := make(map[string]spec.SecurityScheme) + for _, reqs := range requirements { + for _, v := range reqs { + if v.Name == "" { + // optional requirement + continue + } + + if _, ok := result[v.Name]; ok { + // duplicate requirement + continue + } + + if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { + if definition != nil { + result[v.Name] = *definition + } + } + } + } + + return result +} + +// ConsumesFor gets the mediatypes for the operation +func (s *Spec) ConsumesFor(operation *spec.Operation) []string { + if len(operation.Consumes) == 0 { + cons := make(map[string]struct{}, len(s.spec.Consumes)) + for _, k := range s.spec.Consumes { + cons[k] = struct{}{} + } + + return s.structMapKeys(cons) + } + + cons := make(map[string]struct{}, len(operation.Consumes)) + for _, c := range operation.Consumes { + cons[c] = struct{}{} + } + + return s.structMapKeys(cons) +} + +// ProducesFor gets the mediatypes for the operation +func (s *Spec) ProducesFor(operation *spec.Operation) []string { + if len(operation.Produces) == 0 { + prod := make(map[string]struct{}, len(s.spec.Produces)) + for _, k := range s.spec.Produces { + prod[k] = struct{}{} + } + + return s.structMapKeys(prod) + } + + prod := make(map[string]struct{}, len(operation.Produces)) + for _, c := range operation.Produces { + prod[c] = struct{}{} + } + + return s.structMapKeys(prod) +} + +func mapKeyFromParam(param *spec.Parameter) string { + return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) +} + +func fieldNameFromParam(param *spec.Parameter) string { + // TODO: this should be x-go-name + if nm, ok := param.Extensions.GetString("go-name"); ok { + return nm + } + + return swag.ToGoName(param.Name) +} + +// ErrorOnParamFunc is a callback function to be invoked +// whenever an error is encountered while resolving references +// on parameters. +// +// This function takes as input the spec.Parameter which triggered the +// error and the error itself. +// +// If the callback function returns false, the calling function should bail. +// +// If it returns true, the calling function should continue evaluating parameters. +// A nil ErrorOnParamFunc must be evaluated as equivalent to panic(). +type ErrorOnParamFunc func(spec.Parameter, error) bool + +func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter, callmeOnError ErrorOnParamFunc) { + for _, param := range parameters { + pr := param + if pr.Ref.String() == "" { + res[mapKeyFromParam(&pr)] = pr + + continue + } + + // resolve $ref + if callmeOnError == nil { + callmeOnError = func(_ spec.Parameter, err error) bool { + panic(err) + } + } + + obj, _, err := pr.Ref.GetPointer().Get(s.spec) + if err != nil { + if callmeOnError(param, fmt.Errorf("invalid reference: %q", pr.Ref.String())) { + continue + } + + break + } + + objAsParam, ok := obj.(spec.Parameter) + if !ok { + if callmeOnError(param, fmt.Errorf("resolved reference is not a parameter: %q", pr.Ref.String())) { + continue + } + + break + } + + pr = objAsParam + res[mapKeyFromParam(&pr)] = pr + } +} + +// ParametersFor the specified operation id. +// +// Assumes parameters properly resolve references if any and that +// such references actually resolve to a parameter object. +// Otherwise, panics. +func (s *Spec) ParametersFor(operationID string) []spec.Parameter { + return s.SafeParametersFor(operationID, nil) +} + +// SafeParametersFor the specified operation id. +// +// Does not assume parameters properly resolve references or that +// such references actually resolve to a parameter object. +// +// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// parameters. If the callback is set to nil, panics upon errors. +func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamFunc) []spec.Parameter { + gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { + bag := make(map[string]spec.Parameter) + s.paramsAsMap(pi.Parameters, bag, callmeOnError) + s.paramsAsMap(op.Parameters, bag, callmeOnError) + + var res []spec.Parameter + for _, v := range bag { + res = append(res, v) + } + + return res + } + + for _, pi := range s.spec.Paths.Paths { + if pi.Get != nil && pi.Get.ID == operationID { + return gatherParams(&pi, pi.Get) //#nosec + } + if pi.Head != nil && pi.Head.ID == operationID { + return gatherParams(&pi, pi.Head) //#nosec + } + if pi.Options != nil && pi.Options.ID == operationID { + return gatherParams(&pi, pi.Options) //#nosec + } + if pi.Post != nil && pi.Post.ID == operationID { + return gatherParams(&pi, pi.Post) //#nosec + } + if pi.Patch != nil && pi.Patch.ID == operationID { + return gatherParams(&pi, pi.Patch) //#nosec + } + if pi.Put != nil && pi.Put.ID == operationID { + return gatherParams(&pi, pi.Put) //#nosec + } + if pi.Delete != nil && pi.Delete.ID == operationID { + return gatherParams(&pi, pi.Delete) //#nosec + } + } + + return nil +} + +// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that +// apply for the method and path. +// +// Assumes parameters properly resolve references if any and that +// such references actually resolve to a parameter object. +// Otherwise, panics. +func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { + return s.SafeParamsFor(method, path, nil) +} + +// SafeParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that +// apply for the method and path. +// +// Does not assume parameters properly resolve references or that +// such references actually resolve to a parameter object. +// +// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// parameters. If the callback is set to nil, panics upon errors. +func (s *Spec) SafeParamsFor(method, path string, callmeOnError ErrorOnParamFunc) map[string]spec.Parameter { + res := make(map[string]spec.Parameter) + if pi, ok := s.spec.Paths.Paths[path]; ok { + s.paramsAsMap(pi.Parameters, res, callmeOnError) + s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res, callmeOnError) + } + + return res +} + +// OperationForName gets the operation for the given id +func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { + for method, pathItem := range s.operations { + for path, op := range pathItem { + if operationID == op.ID { + return method, path, op, true + } + } + } + + return "", "", nil, false +} + +// OperationFor the given method and path +func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { + if mp, ok := s.operations[strings.ToUpper(method)]; ok { + op, fn := mp[path] + + return op, fn + } + + return nil, false +} + +// Operations gathers all the operations specified in the spec document +func (s *Spec) Operations() map[string]map[string]*spec.Operation { + return s.operations +} + +func (s *Spec) structMapKeys(mp map[string]struct{}) []string { + if len(mp) == 0 { + return nil + } + + result := make([]string, 0, len(mp)) + for k := range mp { + result = append(result, k) + } + + return result +} + +// AllPaths returns all the paths in the swagger spec +func (s *Spec) AllPaths() map[string]spec.PathItem { + if s.spec == nil || s.spec.Paths == nil { + return nil + } + + return s.spec.Paths.Paths +} + +// OperationIDs gets all the operation ids based on method an dpath +func (s *Spec) OperationIDs() []string { + if len(s.operations) == 0 { + return nil + } + + result := make([]string, 0, len(s.operations)) + for method, v := range s.operations { + for p, o := range v { + if o.ID != "" { + result = append(result, o.ID) + } else { + result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) + } + } + } + + return result +} + +// OperationMethodPaths gets all the operation ids based on method an dpath +func (s *Spec) OperationMethodPaths() []string { + if len(s.operations) == 0 { + return nil + } + + result := make([]string, 0, len(s.operations)) + for method, v := range s.operations { + for p := range v { + result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) + } + } + + return result +} + +// RequiredConsumes gets all the distinct consumes that are specified in the specification document +func (s *Spec) RequiredConsumes() []string { + return s.structMapKeys(s.consumes) +} + +// RequiredProduces gets all the distinct produces that are specified in the specification document +func (s *Spec) RequiredProduces() []string { + return s.structMapKeys(s.produces) +} + +// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec +func (s *Spec) RequiredSecuritySchemes() []string { + return s.structMapKeys(s.authSchemes) +} + +// SchemaRef is a reference to a schema +type SchemaRef struct { + Name string + Ref spec.Ref + Schema *spec.Schema + TopLevel bool +} + +// SchemasWithAllOf returns schema references to all schemas that are defined +// with an allOf key +func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { + for _, v := range s.allOfs { + result = append(result, v) + } + + return +} + +// AllDefinitions returns schema references for all the definitions that were discovered +func (s *Spec) AllDefinitions() (result []SchemaRef) { + for _, v := range s.allSchemas { + result = append(result, v) + } + + return +} + +// AllDefinitionReferences returns json refs for all the discovered schemas +func (s *Spec) AllDefinitionReferences() (result []string) { + for _, v := range s.references.schemas { + result = append(result, v.String()) + } + + return +} + +// AllParameterReferences returns json refs for all the discovered parameters +func (s *Spec) AllParameterReferences() (result []string) { + for _, v := range s.references.parameters { + result = append(result, v.String()) + } + + return +} + +// AllResponseReferences returns json refs for all the discovered responses +func (s *Spec) AllResponseReferences() (result []string) { + for _, v := range s.references.responses { + result = append(result, v.String()) + } + + return +} + +// AllPathItemReferences returns the references for all the items +func (s *Spec) AllPathItemReferences() (result []string) { + for _, v := range s.references.pathItems { + result = append(result, v.String()) + } + + return +} + +// AllItemsReferences returns the references for all the items in simple schemas (parameters or headers). +// +// NOTE: since Swagger 2.0 forbids $ref in simple params, this should always yield an empty slice for a valid +// Swagger 2.0 spec. +func (s *Spec) AllItemsReferences() (result []string) { + for _, v := range s.references.items { + result = append(result, v.String()) + } + + return +} + +// AllReferences returns all the references found in the document, with possible duplicates +func (s *Spec) AllReferences() (result []string) { + for _, v := range s.references.allRefs { + result = append(result, v.String()) + } + + return +} + +// AllRefs returns all the unique references found in the document +func (s *Spec) AllRefs() (result []spec.Ref) { + set := make(map[string]struct{}) + for _, v := range s.references.allRefs { + a := v.String() + if a == "" { + continue + } + + if _, ok := set[a]; !ok { + set[a] = struct{}{} + result = append(result, v) + } + } + + return +} + +func cloneStringMap(source map[string]string) map[string]string { + res := make(map[string]string, len(source)) + for k, v := range source { + res[k] = v + } + + return res +} + +func cloneEnumMap(source map[string][]interface{}) map[string][]interface{} { + res := make(map[string][]interface{}, len(source)) + for k, v := range source { + res[k] = v + } + + return res +} + +// ParameterPatterns returns all the patterns found in parameters +// the map is cloned to avoid accidental changes +func (s *Spec) ParameterPatterns() map[string]string { + return cloneStringMap(s.patterns.parameters) +} + +// HeaderPatterns returns all the patterns found in response headers +// the map is cloned to avoid accidental changes +func (s *Spec) HeaderPatterns() map[string]string { + return cloneStringMap(s.patterns.headers) +} + +// ItemsPatterns returns all the patterns found in simple array items +// the map is cloned to avoid accidental changes +func (s *Spec) ItemsPatterns() map[string]string { + return cloneStringMap(s.patterns.items) +} + +// SchemaPatterns returns all the patterns found in schemas +// the map is cloned to avoid accidental changes +func (s *Spec) SchemaPatterns() map[string]string { + return cloneStringMap(s.patterns.schemas) +} + +// AllPatterns returns all the patterns found in the spec +// the map is cloned to avoid accidental changes +func (s *Spec) AllPatterns() map[string]string { + return cloneStringMap(s.patterns.allPatterns) +} + +// ParameterEnums returns all the enums found in parameters +// the map is cloned to avoid accidental changes +func (s *Spec) ParameterEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.parameters) +} + +// HeaderEnums returns all the enums found in response headers +// the map is cloned to avoid accidental changes +func (s *Spec) HeaderEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.headers) +} + +// ItemsEnums returns all the enums found in simple array items +// the map is cloned to avoid accidental changes +func (s *Spec) ItemsEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.items) +} + +// SchemaEnums returns all the enums found in schemas +// the map is cloned to avoid accidental changes +func (s *Spec) SchemaEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.schemas) +} + +// AllEnums returns all the enums found in the spec +// the map is cloned to avoid accidental changes +func (s *Spec) AllEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.allEnums) +} diff --git a/vendor/github.com/go-openapi/analysis/debug.go b/vendor/github.com/go-openapi/analysis/debug.go new file mode 100644 index 0000000000..33c15704ec --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/debug.go @@ -0,0 +1,23 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "os" + + "github.com/go-openapi/analysis/internal/debug" +) + +var debugLog = debug.GetLogger("analysis", os.Getenv("SWAGGER_DEBUG") != "") diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go new file mode 100644 index 0000000000..e8d9f9b131 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/doc.go @@ -0,0 +1,43 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package analysis provides methods to work with a Swagger specification document from +package go-openapi/spec. + +## Analyzing a specification + +An analysed specification object (type Spec) provides methods to work with swagger definition. + +## Flattening or expanding a specification + +Flattening a specification bundles all remote $ref in the main spec document. +Depending on flattening options, additional preprocessing may take place: + - full flattening: replacing all inline complex constructs by a named entry in #/definitions + - expand: replace all $ref's in the document by their expanded content + +## Merging several specifications + +Mixin several specifications merges all Swagger constructs, and warns about found conflicts. + +## Fixing a specification + +Unmarshalling a specification with golang json unmarshalling may lead to +some unwanted result on present but empty fields. + +## Analyzing a Swagger schema + +Swagger schemas are analyzed to determine their complexity and qualify their content. +*/ +package analysis diff --git a/vendor/github.com/go-openapi/analysis/fixer.go b/vendor/github.com/go-openapi/analysis/fixer.go new file mode 100644 index 0000000000..7c2ca08416 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixer.go @@ -0,0 +1,79 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import "github.com/go-openapi/spec" + +// FixEmptyResponseDescriptions replaces empty ("") response +// descriptions in the input with "(empty)" to ensure that the +// resulting Swagger is stays valid. The problem appears to arise +// from reading in valid specs that have a explicit response +// description of "" (valid, response.description is required), but +// due to zero values being omitted upon re-serializing (omitempty) we +// lose them unless we stick some chars in there. +func FixEmptyResponseDescriptions(s *spec.Swagger) { + for k, v := range s.Responses { + FixEmptyDesc(&v) //#nosec + s.Responses[k] = v + } + + if s.Paths == nil { + return + } + + for _, v := range s.Paths.Paths { + if v.Get != nil { + FixEmptyDescs(v.Get.Responses) + } + if v.Put != nil { + FixEmptyDescs(v.Put.Responses) + } + if v.Post != nil { + FixEmptyDescs(v.Post.Responses) + } + if v.Delete != nil { + FixEmptyDescs(v.Delete.Responses) + } + if v.Options != nil { + FixEmptyDescs(v.Options.Responses) + } + if v.Head != nil { + FixEmptyDescs(v.Head.Responses) + } + if v.Patch != nil { + FixEmptyDescs(v.Patch.Responses) + } + } +} + +// FixEmptyDescs adds "(empty)" as the description for any Response in +// the given Responses object that doesn't already have one. +func FixEmptyDescs(rs *spec.Responses) { + FixEmptyDesc(rs.Default) + for k, v := range rs.StatusCodeResponses { + FixEmptyDesc(&v) //#nosec + rs.StatusCodeResponses[k] = v + } +} + +// FixEmptyDesc adds "(empty)" as the description to the given +// Response object if it doesn't already have one and isn't a +// ref. No-op on nil input. +func FixEmptyDesc(rs *spec.Response) { + if rs == nil || rs.Description != "" || rs.Ref.Ref.GetURL() != nil { + return + } + rs.Description = "(empty)" +} diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go new file mode 100644 index 0000000000..ebedcc9df3 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/flatten.go @@ -0,0 +1,814 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + "log" + "path" + "sort" + "strings" + + "github.com/go-openapi/analysis/internal/flatten/normalize" + "github.com/go-openapi/analysis/internal/flatten/operations" + "github.com/go-openapi/analysis/internal/flatten/replace" + "github.com/go-openapi/analysis/internal/flatten/schutils" + "github.com/go-openapi/analysis/internal/flatten/sortref" + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" +) + +const definitionsPath = "#/definitions" + +// newRef stores information about refs created during the flattening process +type newRef struct { + key string + newName string + path string + isOAIGen bool + resolved bool + schema *spec.Schema + parents []string +} + +// context stores intermediary results from flatten +type context struct { + newRefs map[string]*newRef + warnings []string + resolved map[string]string +} + +func newContext() *context { + return &context{ + newRefs: make(map[string]*newRef, 150), + warnings: make([]string, 0), + resolved: make(map[string]string, 50), + } +} + +// Flatten an analyzed spec and produce a self-contained spec bundle. +// +// There is a minimal and a full flattening mode. +// +// Minimally flattening a spec means: +// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left +// unscathed) +// - Importing external (http, file) references so they become internal to the document +// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers +// like "$ref": "#/definitions/myObject/allOfs/1") +// +// A minimally flattened spec thus guarantees the following properties: +// - all $refs point to a local definition (i.e. '#/definitions/...') +// - definitions are unique +// +// NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they +// represent a complex schema or express commonality in the spec. +// Otherwise, they are simply expanded. +// Self-referencing JSON pointers cannot resolve to a type and trigger an error. +// +// Minimal flattening is necessary and sufficient for codegen rendering using go-swagger. +// +// Fully flattening a spec means: +// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. +// +// By complex, we mean every JSON object with some properties. +// Arrays, when they do not define a tuple, +// or empty objects with or without additionalProperties, are not considered complex and remain inline. +// +// NOTE: rewritten schemas get a vendor extension x-go-gen-location so we know from which part of the spec definitions +// have been created. +// +// Available flattening options: +// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched +// - Expand: expand all $ref's in the document (inoperant if Minimal set to true) +// - Verbose: croaks about name conflicts detected +// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening +// +// NOTE: expansion removes all $ref save circular $ref, which remain in place +// +// TODO: additional options +// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a +// x-go-name extension +// - LiftAllOfs: +// - limit the flattening of allOf members when simple objects +// - merge allOf with validation only +// - merge allOf with extensions only +// - ... +func Flatten(opts FlattenOpts) error { + debugLog("FlattenOpts: %#v", opts) + + opts.flattenContext = newContext() + + // 1. Recursively expand responses, parameters, path items and items in simple schemas. + // + // This simplifies the spec and leaves only the $ref's in schema objects. + if err := expand(&opts); err != nil { + return err + } + + // 2. Strip the current document from absolute $ref's that actually a in the root, + // so we can recognize them as proper definitions + // + // In particular, this works around issue go-openapi/spec#76: leading absolute file in $ref is stripped + if err := normalizeRef(&opts); err != nil { + return err + } + + // 3. Optionally remove shared parameters and responses already expanded (now unused). + // + // Operation parameters (i.e. under paths) remain. + if opts.RemoveUnused { + removeUnusedShared(&opts) + } + + // 4. Import all remote references. + if err := importReferences(&opts); err != nil { + return err + } + + // 5. full flattening: rewrite inline schemas (schemas that aren't simple types or arrays or maps) + if !opts.Minimal && !opts.Expand { + if err := nameInlinedSchemas(&opts); err != nil { + return err + } + } + + // 6. Rewrite JSON pointers other than $ref to named definitions + // and attempt to resolve conflicting names whenever possible. + if err := stripPointersAndOAIGen(&opts); err != nil { + return err + } + + // 7. Strip the spec from unused definitions + if opts.RemoveUnused { + removeUnused(&opts) + } + + // 8. Issue warning notifications, if any + opts.croak() + + // TODO: simplify known schema patterns to flat objects with properties + // examples: + // - lift simple allOf object, + // - empty allOf with validation only or extensions only + // - rework allOf arrays + // - rework allOf additionalProperties + + return nil +} + +func expand(opts *FlattenOpts) error { + if err := spec.ExpandSpec(opts.Swagger(), opts.ExpandOpts(!opts.Expand)); err != nil { + return err + } + + opts.Spec.reload() // re-analyze + + return nil +} + +// normalizeRef strips the current file from any absolute file $ref. This works around issue go-openapi/spec#76: +// leading absolute file in $ref is stripped +func normalizeRef(opts *FlattenOpts) error { + debugLog("normalizeRef") + + altered := false + for k, w := range opts.Spec.references.allRefs { + if !strings.HasPrefix(w.String(), opts.BasePath+definitionsPath) { // may be a mix of / and \, depending on OS + continue + } + + altered = true + debugLog("stripping absolute path for: %s", w.String()) + + // strip the base path from definition + if err := replace.UpdateRef(opts.Swagger(), k, + spec.MustCreateRef(path.Join(definitionsPath, path.Base(w.String())))); err != nil { + return err + } + } + + if altered { + opts.Spec.reload() // re-analyze + } + + return nil +} + +func removeUnusedShared(opts *FlattenOpts) { + opts.Swagger().Parameters = nil + opts.Swagger().Responses = nil + + opts.Spec.reload() // re-analyze +} + +func importReferences(opts *FlattenOpts) error { + var ( + imported bool + err error + ) + + for !imported && err == nil { + // iteratively import remote references until none left. + // This inlining deals with name conflicts by introducing auto-generated names ("OAIGen") + imported, err = importExternalReferences(opts) + + opts.Spec.reload() // re-analyze + } + + return err +} + +// nameInlinedSchemas replaces every complex inline construct by a named definition. +func nameInlinedSchemas(opts *FlattenOpts) error { + debugLog("nameInlinedSchemas") + + namer := &InlineSchemaNamer{ + Spec: opts.Swagger(), + Operations: operations.AllOpRefsByRef(opts.Spec, nil), + flattenContext: opts.flattenContext, + opts: opts, + } + + depthFirst := sortref.DepthFirst(opts.Spec.allSchemas) + for _, key := range depthFirst { + sch := opts.Spec.allSchemas[key] + if sch.Schema == nil || sch.Schema.Ref.String() != "" || sch.TopLevel { + continue + } + + asch, err := Schema(SchemaOpts{Schema: sch.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if err != nil { + return fmt.Errorf("schema analysis [%s]: %w", key, err) + } + + if asch.isAnalyzedAsComplex() { // move complex schemas to definitions + if err := namer.Name(key, sch.Schema, asch); err != nil { + return err + } + } + } + + opts.Spec.reload() // re-analyze + + return nil +} + +func removeUnused(opts *FlattenOpts) { + for removeUnusedSinglePass(opts) { + // continue until no unused definition remains + } +} + +func removeUnusedSinglePass(opts *FlattenOpts) (hasRemoved bool) { + expected := make(map[string]struct{}) + for k := range opts.Swagger().Definitions { + expected[path.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{} + } + + for _, k := range opts.Spec.AllDefinitionReferences() { + delete(expected, k) + } + + for k := range expected { + hasRemoved = true + debugLog("removing unused definition %s", path.Base(k)) + if opts.Verbose { + log.Printf("info: removing unused definition: %s", path.Base(k)) + } + delete(opts.Swagger().Definitions, path.Base(k)) + } + + opts.Spec.reload() // re-analyze + + return hasRemoved +} + +func importKnownRef(entry sortref.RefRevIdx, refStr, newName string, opts *FlattenOpts) error { + // rewrite ref with already resolved external ref (useful for cyclical refs): + // rewrite external refs to local ones + debugLog("resolving known ref [%s] to %s", refStr, newName) + + for _, key := range entry.Keys { + if err := replace.UpdateRef(opts.Swagger(), key, spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return err + } + } + + return nil +} + +func importNewRef(entry sortref.RefRevIdx, refStr string, opts *FlattenOpts) error { + var ( + isOAIGen bool + newName string + ) + + debugLog("resolving schema from remote $ref [%s]", refStr) + + sch, err := spec.ResolveRefWithBase(opts.Swagger(), &entry.Ref, opts.ExpandOpts(false)) + if err != nil { + return fmt.Errorf("could not resolve schema: %w", err) + } + + // at this stage only $ref analysis matters + partialAnalyzer := &Spec{ + references: referenceAnalysis{}, + patterns: patternAnalysis{}, + enums: enumAnalysis{}, + } + partialAnalyzer.reset() + partialAnalyzer.analyzeSchema("", sch, "/") + + // now rewrite those refs with rebase + for key, ref := range partialAnalyzer.references.allRefs { + if err := replace.UpdateRef(sch, key, spec.MustCreateRef(normalize.RebaseRef(entry.Ref.String(), ref.String()))); err != nil { + return fmt.Errorf("failed to rewrite ref for key %q at %s: %w", key, entry.Ref.String(), err) + } + } + + // generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name + newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref, opts)) + debugLog("new name for [%s]: %s - with name conflict:%t", strings.Join(entry.Keys, ", "), newName, isOAIGen) + + opts.flattenContext.resolved[refStr] = newName + + // rewrite the external refs to local ones + for _, key := range entry.Keys { + if err := replace.UpdateRef(opts.Swagger(), key, + spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return err + } + + // keep track of created refs + resolved := false + if _, ok := opts.flattenContext.newRefs[key]; ok { + resolved = opts.flattenContext.newRefs[key].resolved + } + + debugLog("keeping track of ref: %s (%s), resolved: %t", key, newName, resolved) + opts.flattenContext.newRefs[key] = &newRef{ + key: key, + newName: newName, + path: path.Join(definitionsPath, newName), + isOAIGen: isOAIGen, + resolved: resolved, + schema: sch, + } + } + + // add the resolved schema to the definitions + schutils.Save(opts.Swagger(), newName, sch) + + return nil +} + +// importExternalReferences iteratively digs remote references and imports them into the main schema. +// +// At every iteration, new remotes may be found when digging deeper: they are rebased to the current schema before being imported. +// +// This returns true when no more remote references can be found. +func importExternalReferences(opts *FlattenOpts) (bool, error) { + debugLog("importExternalReferences") + + groupedRefs := sortref.ReverseIndex(opts.Spec.references.schemas, opts.BasePath) + sortedRefStr := make([]string, 0, len(groupedRefs)) + if opts.flattenContext == nil { + opts.flattenContext = newContext() + } + + // sort $ref resolution to ensure deterministic name conflict resolution + for refStr := range groupedRefs { + sortedRefStr = append(sortedRefStr, refStr) + } + sort.Strings(sortedRefStr) + + complete := true + + for _, refStr := range sortedRefStr { + entry := groupedRefs[refStr] + if entry.Ref.HasFragmentOnly { + continue + } + + complete = false + + newName := opts.flattenContext.resolved[refStr] + if newName != "" { + if err := importKnownRef(entry, refStr, newName, opts); err != nil { + return false, err + } + + continue + } + + // resolve schemas + if err := importNewRef(entry, refStr, opts); err != nil { + return false, err + } + } + + // maintains ref index entries + for k := range opts.flattenContext.newRefs { + r := opts.flattenContext.newRefs[k] + + // update tracking with resolved schemas + if r.schema.Ref.String() != "" { + ref := spec.MustCreateRef(r.path) + sch, err := spec.ResolveRefWithBase(opts.Swagger(), &ref, opts.ExpandOpts(false)) + if err != nil { + return false, fmt.Errorf("could not resolve schema: %w", err) + } + + r.schema = sch + } + + if r.path == k { + continue + } + + // update tracking with renamed keys: got a cascade of refs + renamed := *r + renamed.key = r.path + opts.flattenContext.newRefs[renamed.path] = &renamed + + // indirect ref + r.newName = path.Base(k) + r.schema = spec.RefSchema(r.path) + r.path = k + r.isOAIGen = strings.Contains(k, "OAIGen") + } + + return complete, nil +} + +// stripPointersAndOAIGen removes anonymous JSON pointers from spec and chain with name conflicts handler. +// This loops until the spec has no such pointer and all name conflicts have been reduced as much as possible. +func stripPointersAndOAIGen(opts *FlattenOpts) error { + // name all JSON pointers to anonymous documents + if err := namePointers(opts); err != nil { + return err + } + + // remove unnecessary OAIGen ref (created when flattening external refs creates name conflicts) + hasIntroducedPointerOrInline, ers := stripOAIGen(opts) + if ers != nil { + return ers + } + + // iterate as pointer or OAIGen resolution may introduce inline schemas or pointers + for hasIntroducedPointerOrInline { + if !opts.Minimal { + opts.Spec.reload() // re-analyze + if err := nameInlinedSchemas(opts); err != nil { + return err + } + } + + if err := namePointers(opts); err != nil { + return err + } + + // restrip and re-analyze + var err error + if hasIntroducedPointerOrInline, err = stripOAIGen(opts); err != nil { + return err + } + } + + return nil +} + +// stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions. +// +// A dedupe is deemed unnecessary whenever: +// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining) +// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to +// the first parent. +// +// This function returns true whenever it re-inlined a complex schema, so the caller may chose to iterate +// pointer and name resolution again. +func stripOAIGen(opts *FlattenOpts) (bool, error) { + debugLog("stripOAIGen") + replacedWithComplex := false + + // figure out referers of OAIGen definitions (doing it before the ref start mutating) + for _, r := range opts.flattenContext.newRefs { + updateRefParents(opts.Spec.references.allRefs, r) + } + + for k := range opts.flattenContext.newRefs { + r := opts.flattenContext.newRefs[k] + debugLog("newRefs[%s]: isOAIGen: %t, resolved: %t, name: %s, path:%s, #parents: %d, parents: %v, ref: %s", + k, r.isOAIGen, r.resolved, r.newName, r.path, len(r.parents), r.parents, r.schema.Ref.String()) + + if !r.isOAIGen || len(r.parents) == 0 { + continue + } + + hasReplacedWithComplex, err := stripOAIGenForRef(opts, k, r) + if err != nil { + return replacedWithComplex, err + } + + replacedWithComplex = replacedWithComplex || hasReplacedWithComplex + } + + debugLog("replacedWithComplex: %t", replacedWithComplex) + opts.Spec.reload() // re-analyze + + return replacedWithComplex, nil +} + +// updateRefParents updates all parents of an updated $ref +func updateRefParents(allRefs map[string]spec.Ref, r *newRef) { + if !r.isOAIGen || r.resolved { // bail on already resolved entries (avoid looping) + return + } + for k, v := range allRefs { + if r.path != v.String() { + continue + } + + found := false + for _, p := range r.parents { + if p == k { + found = true + + break + } + } + if !found { + r.parents = append(r.parents, k) + } + } +} + +func stripOAIGenForRef(opts *FlattenOpts, k string, r *newRef) (bool, error) { + replacedWithComplex := false + + pr := sortref.TopmostFirst(r.parents) + + // rewrite first parent schema in hierarchical then lexicographical order + debugLog("rewrite first parent %s with schema", pr[0]) + if err := replace.UpdateRefWithSchema(opts.Swagger(), pr[0], r.schema); err != nil { + return false, err + } + + if pa, ok := opts.flattenContext.newRefs[pr[0]]; ok && pa.isOAIGen { + // update parent in ref index entry + debugLog("update parent entry: %s", pr[0]) + pa.schema = r.schema + pa.resolved = false + replacedWithComplex = true + } + + // rewrite other parents to point to first parent + if len(pr) > 1 { + for _, p := range pr[1:] { + replacingRef := spec.MustCreateRef(pr[0]) + + // set complex when replacing ref is an anonymous jsonpointer: further processing may be required + replacedWithComplex = replacedWithComplex || path.Dir(replacingRef.String()) != definitionsPath + debugLog("rewrite parent with ref: %s", replacingRef.String()) + + // NOTE: it is possible at this stage to introduce json pointers (to non-definitions places). + // Those are stripped later on. + if err := replace.UpdateRef(opts.Swagger(), p, replacingRef); err != nil { + return false, err + } + + if pa, ok := opts.flattenContext.newRefs[p]; ok && pa.isOAIGen { + // update parent in ref index + debugLog("update parent entry: %s", p) + pa.schema = r.schema + pa.resolved = false + replacedWithComplex = true + } + } + } + + // remove OAIGen definition + debugLog("removing definition %s", path.Base(r.path)) + delete(opts.Swagger().Definitions, path.Base(r.path)) + + // propagate changes in ref index for keys which have this one as a parent + for kk, value := range opts.flattenContext.newRefs { + if kk == k || !value.isOAIGen || value.resolved { + continue + } + + found := false + newParents := make([]string, 0, len(value.parents)) + for _, parent := range value.parents { + switch { + case parent == r.path: + found = true + parent = pr[0] + case strings.HasPrefix(parent, r.path+"/"): + found = true + parent = path.Join(pr[0], strings.TrimPrefix(parent, r.path)) + } + + newParents = append(newParents, parent) + } + + if found { + value.parents = newParents + } + } + + // mark naming conflict as resolved + debugLog("marking naming conflict resolved for key: %s", r.key) + opts.flattenContext.newRefs[r.key].isOAIGen = false + opts.flattenContext.newRefs[r.key].resolved = true + + // determine if the previous substitution did inline a complex schema + if r.schema != nil && r.schema.Ref.String() == "" { // inline schema + asch, err := Schema(SchemaOpts{Schema: r.schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if err != nil { + return false, err + } + + debugLog("re-inlined schema: parent: %s, %t", pr[0], asch.isAnalyzedAsComplex()) + replacedWithComplex = replacedWithComplex || !(path.Dir(pr[0]) == definitionsPath) && asch.isAnalyzedAsComplex() + } + + return replacedWithComplex, nil +} + +// namePointers replaces all JSON pointers to anonymous documents by a $ref to a new named definitions. +// +// This is carried on depth-first. Pointers to $refs which are top level definitions are replaced by the $ref itself. +// Pointers to simple types are expanded, unless they express commonality (i.e. several such $ref are used). +func namePointers(opts *FlattenOpts) error { + debugLog("name pointers") + + refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas)) + for k, ref := range opts.Spec.references.allRefs { + debugLog("name pointers: %q => %#v", k, ref) + if path.Dir(ref.String()) == definitionsPath { + // this a ref to a top-level definition: ok + continue + } + + result, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), ref) + if err != nil { + return fmt.Errorf("at %s, %w", k, err) + } + + replacingRef := result.Ref + sch := result.Schema + if opts.flattenContext != nil { + opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...) + } + + debugLog("planning pointer to replace at %s: %s, resolved to: %s", k, ref.String(), replacingRef.String()) + refsToReplace[k] = SchemaRef{ + Name: k, // caller + Ref: replacingRef, // called + Schema: sch, + TopLevel: path.Dir(replacingRef.String()) == definitionsPath, + } + } + + depthFirst := sortref.DepthFirst(refsToReplace) + namer := &InlineSchemaNamer{ + Spec: opts.Swagger(), + Operations: operations.AllOpRefsByRef(opts.Spec, nil), + flattenContext: opts.flattenContext, + opts: opts, + } + + for _, key := range depthFirst { + v := refsToReplace[key] + // update current replacement, which may have been updated by previous changes of deeper elements + result, erd := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), v.Ref) + if erd != nil { + return fmt.Errorf("at %s, %w", key, erd) + } + + if opts.flattenContext != nil { + opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...) + } + + v.Ref = result.Ref + v.Schema = result.Schema + v.TopLevel = path.Dir(result.Ref.String()) == definitionsPath + debugLog("replacing pointer at %s: resolved to: %s", key, v.Ref.String()) + + if v.TopLevel { + debugLog("replace pointer %s by canonical definition: %s", key, v.Ref.String()) + + // if the schema is a $ref to a top level definition, just rewrite the pointer to this $ref + if err := replace.UpdateRef(opts.Swagger(), key, v.Ref); err != nil { + return err + } + + continue + } + + if err := flattenAnonPointer(key, v, refsToReplace, namer, opts); err != nil { + return err + } + } + + opts.Spec.reload() // re-analyze + + return nil +} + +func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]SchemaRef, namer *InlineSchemaNamer, opts *FlattenOpts) error { + // this is a JSON pointer to an anonymous document (internal or external): + // create a definition for this schema when: + // - it is a complex schema + // - or it is pointed by more than one $ref (i.e. expresses commonality) + // otherwise, expand the pointer (single reference to a simple type) + // + // The named definition for this follows the target's key, not the caller's + debugLog("namePointers at %s for %s", key, v.Ref.String()) + + // qualify the expanded schema + asch, ers := Schema(SchemaOpts{Schema: v.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if ers != nil { + return fmt.Errorf("schema analysis [%s]: %w", key, ers) + } + callers := make([]string, 0, 64) + + debugLog("looking for callers") + + an := New(opts.Swagger()) + for k, w := range an.references.allRefs { + r, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), w) + if err != nil { + return fmt.Errorf("at %s, %w", key, err) + } + + if opts.flattenContext != nil { + opts.flattenContext.warnings = append(opts.flattenContext.warnings, r.Warnings...) + } + + if r.Ref.String() == v.Ref.String() { + callers = append(callers, k) + } + } + + debugLog("callers for %s: %d", v.Ref.String(), len(callers)) + if len(callers) == 0 { + // has already been updated and resolved + return nil + } + + parts := sortref.KeyParts(v.Ref.String()) + debugLog("number of callers for %s: %d", v.Ref.String(), len(callers)) + + // identifying edge case when the namer did nothing because we point to a non-schema object + // no definition is created and we expand the $ref for all callers + debugLog("decide what to do with the schema pointed to: asch.IsSimpleSchema=%t, len(callers)=%d, parts.IsSharedParam=%t, parts.IsSharedResponse=%t", + asch.IsSimpleSchema, len(callers), parts.IsSharedParam(), parts.IsSharedResponse(), + ) + + if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() { + debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String()) + if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil { + return err + } + + // regular case: we named the $ref as a definition, and we move all callers to this new $ref + for _, caller := range callers { + if caller == key { + continue + } + + // move $ref for next to resolve + debugLog("identified caller of %s at [%s]", v.Ref.String(), caller) + c := refsToReplace[caller] + c.Ref = v.Ref + refsToReplace[caller] = c + } + + return nil + } + + // everything that is a simple schema and not factorizable is expanded + debugLog("expand JSON pointer for key=%s", key) + + if err := replace.UpdateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil { + return err + } + // NOTE: there is no other caller to update + + return nil +} diff --git a/vendor/github.com/go-openapi/analysis/flatten_name.go b/vendor/github.com/go-openapi/analysis/flatten_name.go new file mode 100644 index 0000000000..c7d7938ebe --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/flatten_name.go @@ -0,0 +1,308 @@ +package analysis + +import ( + "fmt" + "path" + "sort" + "strings" + + "github.com/go-openapi/analysis/internal/flatten/operations" + "github.com/go-openapi/analysis/internal/flatten/replace" + "github.com/go-openapi/analysis/internal/flatten/schutils" + "github.com/go-openapi/analysis/internal/flatten/sortref" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// InlineSchemaNamer finds a new name for an inlined type +type InlineSchemaNamer struct { + Spec *spec.Swagger + Operations map[string]operations.OpRef + flattenContext *context + opts *FlattenOpts +} + +// Name yields a new name for the inline schema +func (isn *InlineSchemaNamer) Name(key string, schema *spec.Schema, aschema *AnalyzedSchema) error { + debugLog("naming inlined schema at %s", key) + + parts := sortref.KeyParts(key) + for _, name := range namesFromKey(parts, aschema, isn.Operations) { + if name == "" { + continue + } + + // create unique name + mangle := mangler(isn.opts) + newName, isOAIGen := uniqifyName(isn.Spec.Definitions, mangle(name)) + + // clone schema + sch := schutils.Clone(schema) + + // replace values on schema + debugLog("rewriting schema to ref: key=%s with new name: %s", key, newName) + if err := replace.RewriteSchemaToRef(isn.Spec, key, + spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return fmt.Errorf("error while creating definition %q from inline schema: %w", newName, err) + } + + // rewrite any dependent $ref pointing to this place, + // when not already pointing to a top-level definition. + // + // NOTE: this is important if such referers use arbitrary JSON pointers. + an := New(isn.Spec) + for k, v := range an.references.allRefs { + r, erd := replace.DeepestRef(isn.opts.Swagger(), isn.opts.ExpandOpts(false), v) + if erd != nil { + return fmt.Errorf("at %s, %w", k, erd) + } + + if isn.opts.flattenContext != nil { + isn.opts.flattenContext.warnings = append(isn.opts.flattenContext.warnings, r.Warnings...) + } + + if r.Ref.String() != key && (r.Ref.String() != path.Join(definitionsPath, newName) || path.Dir(v.String()) == definitionsPath) { + continue + } + + debugLog("found a $ref to a rewritten schema: %s points to %s", k, v.String()) + + // rewrite $ref to the new target + if err := replace.UpdateRef(isn.Spec, k, + spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return err + } + } + + // NOTE: this extension is currently not used by go-swagger (provided for information only) + sch.AddExtension("x-go-gen-location", GenLocation(parts)) + + // save cloned schema to definitions + schutils.Save(isn.Spec, newName, sch) + + // keep track of created refs + if isn.flattenContext == nil { + continue + } + + debugLog("track created ref: key=%s, newName=%s, isOAIGen=%t", key, newName, isOAIGen) + resolved := false + + if _, ok := isn.flattenContext.newRefs[key]; ok { + resolved = isn.flattenContext.newRefs[key].resolved + } + + isn.flattenContext.newRefs[key] = &newRef{ + key: key, + newName: newName, + path: path.Join(definitionsPath, newName), + isOAIGen: isOAIGen, + resolved: resolved, + schema: sch, + } + } + + return nil +} + +// uniqifyName yields a unique name for a definition +func uniqifyName(definitions spec.Definitions, name string) (string, bool) { + isOAIGen := false + if name == "" { + name = "oaiGen" + isOAIGen = true + } + + if len(definitions) == 0 { + return name, isOAIGen + } + + unq := true + for k := range definitions { + if strings.EqualFold(k, name) { + unq = false + + break + } + } + + if unq { + return name, isOAIGen + } + + name += "OAIGen" + isOAIGen = true + var idx int + unique := name + _, known := definitions[unique] + + for known { + idx++ + unique = fmt.Sprintf("%s%d", name, idx) + _, known = definitions[unique] + } + + return unique, isOAIGen +} + +func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations map[string]operations.OpRef) []string { + var ( + baseNames [][]string + startIndex int + ) + + switch { + case parts.IsOperation(): + baseNames, startIndex = namesForOperation(parts, operations) + case parts.IsDefinition(): + baseNames, startIndex = namesForDefinition(parts) + default: + // this a non-standard pointer: build a name by concatenating its parts + baseNames = [][]string{parts} + startIndex = len(baseNames) + 1 + } + + result := make([]string, 0, len(baseNames)) + for _, segments := range baseNames { + nm := parts.BuildName(segments, startIndex, partAdder(aschema)) + if nm == "" { + continue + } + + result = append(result, nm) + } + sort.Strings(result) + + debugLog("names from parts: %v => %v", parts, result) + return result +} + +func namesForParam(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) { + var ( + baseNames [][]string + startIndex int + ) + + piref := parts.PathItemRef() + if piref.String() != "" && parts.IsOperationParam() { + if op, ok := operations[piref.String()]; ok { + startIndex = 5 + baseNames = append(baseNames, []string{op.ID, "params", "body"}) + } + } else if parts.IsSharedOperationParam() { + pref := parts.PathRef() + for k, v := range operations { + if strings.HasPrefix(k, pref.String()) { + startIndex = 4 + baseNames = append(baseNames, []string{v.ID, "params", "body"}) + } + } + } + + return baseNames, startIndex +} + +func namesForOperation(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) { + var ( + baseNames [][]string + startIndex int + ) + + // params + if parts.IsOperationParam() || parts.IsSharedOperationParam() { + baseNames, startIndex = namesForParam(parts, operations) + } + + // responses + if parts.IsOperationResponse() { + piref := parts.PathItemRef() + if piref.String() != "" { + if op, ok := operations[piref.String()]; ok { + startIndex = 6 + baseNames = append(baseNames, []string{op.ID, parts.ResponseName(), "body"}) + } + } + } + + return baseNames, startIndex +} + +func namesForDefinition(parts sortref.SplitKey) ([][]string, int) { + nm := parts.DefinitionName() + if nm != "" { + return [][]string{{parts.DefinitionName()}}, 2 + } + + return [][]string{}, 0 +} + +// partAdder knows how to interpret a schema when it comes to build a name from parts +func partAdder(aschema *AnalyzedSchema) sortref.PartAdder { + return func(part string) []string { + segments := make([]string, 0, 2) + + if part == "items" || part == "additionalItems" { + if aschema.IsTuple || aschema.IsTupleWithExtra { + segments = append(segments, "tuple") + } else { + segments = append(segments, "items") + } + + if part == "additionalItems" { + segments = append(segments, part) + } + + return segments + } + + segments = append(segments, part) + + return segments + } +} + +func mangler(o *FlattenOpts) func(string) string { + if o.KeepNames { + return func(in string) string { return in } + } + + return swag.ToJSONName +} + +func nameFromRef(ref spec.Ref, o *FlattenOpts) string { + mangle := mangler(o) + + u := ref.GetURL() + if u.Fragment != "" { + return mangle(path.Base(u.Fragment)) + } + + if u.Path != "" { + bn := path.Base(u.Path) + if bn != "" && bn != "/" { + ext := path.Ext(bn) + if ext != "" { + return mangle(bn[:len(bn)-len(ext)]) + } + + return mangle(bn) + } + } + + return mangle(strings.ReplaceAll(u.Host, ".", " ")) +} + +// GenLocation indicates from which section of the specification (models or operations) a definition has been created. +// +// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is provided +// for information only. +func GenLocation(parts sortref.SplitKey) string { + switch { + case parts.IsOperation(): + return "operations" + case parts.IsDefinition(): + return "models" + default: + return "" + } +} diff --git a/vendor/github.com/go-openapi/analysis/flatten_options.go b/vendor/github.com/go-openapi/analysis/flatten_options.go new file mode 100644 index 0000000000..c943fe1e84 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/flatten_options.go @@ -0,0 +1,79 @@ +package analysis + +import ( + "log" + + "github.com/go-openapi/spec" +) + +// FlattenOpts configuration for flattening a swagger specification. +// +// The BasePath parameter is used to locate remote relative $ref found in the specification. +// This path is a file: it points to the location of the root document and may be either a local +// file path or a URL. +// +// If none specified, relative references (e.g. "$ref": "folder/schema.yaml#/definitions/...") +// found in the spec are searched from the current working directory. +type FlattenOpts struct { + Spec *Spec // The analyzed spec to work with + flattenContext *context // Internal context to track flattening activity + + BasePath string // The location of the root document for this spec to resolve relative $ref + + // Flattening options + Expand bool // When true, skip flattening the spec and expand it instead (if Minimal is false) + Minimal bool // When true, do not decompose complex structures such as allOf + Verbose bool // enable some reporting on possible name conflicts detected + RemoveUnused bool // When true, remove unused parameters, responses and definitions after expansion/flattening + ContinueOnError bool // Continue when spec expansion issues are found + KeepNames bool // Do not attempt to jsonify names from references when flattening + + /* Extra keys */ + _ struct{} // require keys +} + +// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document. +func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *spec.ExpandOptions { + return &spec.ExpandOptions{ + RelativeBase: f.BasePath, + SkipSchemas: skipSchemas, + ContinueOnError: f.ContinueOnError, + } +} + +// Swagger gets the swagger specification for this flatten operation +func (f *FlattenOpts) Swagger() *spec.Swagger { + return f.Spec.spec +} + +// croak logs notifications and warnings about valid, but possibly unwanted constructs resulting +// from flattening a spec +func (f *FlattenOpts) croak() { + if !f.Verbose { + return + } + + reported := make(map[string]bool, len(f.flattenContext.newRefs)) + for _, v := range f.Spec.references.allRefs { + // warns about duplicate handling + for _, r := range f.flattenContext.newRefs { + if r.isOAIGen && r.path == v.String() { + reported[r.newName] = true + } + } + } + + for k := range reported { + log.Printf("warning: duplicate flattened definition name resolved as %s", k) + } + + // warns about possible type mismatches + uniqueMsg := make(map[string]bool) + for _, msg := range f.flattenContext.warnings { + if _, ok := uniqueMsg[msg]; ok { + continue + } + log.Printf("warning: %s", msg) + uniqueMsg[msg] = true + } +} diff --git a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go new file mode 100644 index 0000000000..39f55a97bf --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go @@ -0,0 +1,41 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package debug + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" +) + +var ( + output = os.Stdout +) + +// GetLogger provides a prefix debug logger +func GetLogger(prefix string, debug bool) func(string, ...interface{}) { + if debug { + logger := log.New(output, prefix+":", log.LstdFlags) + + return func(msg string, args ...interface{}) { + _, file1, pos1, _ := runtime.Caller(1) + logger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } + } + + return func(_ string, _ ...interface{}) {} +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go new file mode 100644 index 0000000000..8c9df0580d --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go @@ -0,0 +1,87 @@ +package normalize + +import ( + "net/url" + "path" + "path/filepath" + "strings" + + "github.com/go-openapi/spec" +) + +// RebaseRef rebases a remote ref relative to a base ref. +// +// NOTE: does not support JSONschema ID for $ref (we assume we are working with swagger specs here). +// +// NOTE(windows): +// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// * "/ in paths may appear as escape sequences +func RebaseRef(baseRef string, ref string) string { + baseRef, _ = url.PathUnescape(baseRef) + ref, _ = url.PathUnescape(ref) + + if baseRef == "" || baseRef == "." || strings.HasPrefix(baseRef, "#") { + return ref + } + + parts := strings.Split(ref, "#") + + baseParts := strings.Split(baseRef, "#") + baseURL, _ := url.Parse(baseParts[0]) + if strings.HasPrefix(ref, "#") { + if baseURL.Host == "" { + return strings.Join([]string{baseParts[0], parts[1]}, "#") + } + + return strings.Join([]string{baseParts[0], parts[1]}, "#") + } + + refURL, _ := url.Parse(parts[0]) + if refURL.Host != "" || filepath.IsAbs(parts[0]) { + // not rebasing an absolute path + return ref + } + + // there is a relative path + var basePath string + if baseURL.Host != "" { + // when there is a host, standard URI rules apply (with "/") + baseURL.Path = path.Dir(baseURL.Path) + baseURL.Path = path.Join(baseURL.Path, "/"+parts[0]) + + return baseURL.String() + } + + // this is a local relative path + // basePart[0] and parts[0] are local filesystem directories/files + basePath = filepath.Dir(baseParts[0]) + relPath := filepath.Join(basePath, string(filepath.Separator)+parts[0]) + if len(parts) > 1 { + return strings.Join([]string{relPath, parts[1]}, "#") + } + + return relPath +} + +// Path renders absolute path on remote file refs +// +// NOTE(windows): +// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// * "/ in paths may appear as escape sequences +func Path(ref spec.Ref, basePath string) string { + uri, _ := url.PathUnescape(ref.String()) + if ref.HasFragmentOnly || filepath.IsAbs(uri) { + return uri + } + + refURL, _ := url.Parse(uri) + if refURL.Host != "" { + return uri + } + + parts := strings.Split(uri, "#") + // BasePath, parts[0] are local filesystem directories, guaranteed to be absolute at this stage + parts[0] = filepath.Join(filepath.Dir(basePath), parts[0]) + + return strings.Join(parts, "#") +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go b/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go new file mode 100644 index 0000000000..7f3a2b8717 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go @@ -0,0 +1,90 @@ +package operations + +import ( + "path" + "sort" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// AllOpRefsByRef returns an index of sortable operations +func AllOpRefsByRef(specDoc Provider, operationIDs []string) map[string]OpRef { + return OpRefsByRef(GatherOperations(specDoc, operationIDs)) +} + +// OpRefsByRef indexes a map of sortable operations +func OpRefsByRef(oprefs map[string]OpRef) map[string]OpRef { + result := make(map[string]OpRef, len(oprefs)) + for _, v := range oprefs { + result[v.Ref.String()] = v + } + + return result +} + +// OpRef is an indexable, sortable operation +type OpRef struct { + Method string + Path string + Key string + ID string + Op *spec.Operation + Ref spec.Ref +} + +// OpRefs is a sortable collection of operations +type OpRefs []OpRef + +func (o OpRefs) Len() int { return len(o) } +func (o OpRefs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o OpRefs) Less(i, j int) bool { return o[i].Key < o[j].Key } + +// Provider knows how to collect operations from a spec +type Provider interface { + Operations() map[string]map[string]*spec.Operation +} + +// GatherOperations builds a map of sorted operations from a spec +func GatherOperations(specDoc Provider, operationIDs []string) map[string]OpRef { + var oprefs OpRefs + + for method, pathItem := range specDoc.Operations() { + for pth, operation := range pathItem { + vv := *operation + oprefs = append(oprefs, OpRef{ + Key: swag.ToGoName(strings.ToLower(method) + " " + pth), + Method: method, + Path: pth, + ID: vv.ID, + Op: &vv, + Ref: spec.MustCreateRef("#" + path.Join("/paths", jsonpointer.Escape(pth), method)), + }) + } + } + + sort.Sort(oprefs) + + operations := make(map[string]OpRef) + for _, opr := range oprefs { + nm := opr.ID + if nm == "" { + nm = opr.Key + } + + oo, found := operations[nm] + if found && oo.Method != opr.Method && oo.Path != opr.Path { + nm = opr.Key + } + + if len(operationIDs) == 0 || swag.ContainsStrings(operationIDs, opr.ID) || swag.ContainsStrings(operationIDs, nm) { + opr.ID = nm + opr.Op.ID = nm + operations[nm] = opr + } + } + + return operations +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go new file mode 100644 index 0000000000..c0f43e728a --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go @@ -0,0 +1,458 @@ +package replace + +import ( + "encoding/json" + "fmt" + "net/url" + "os" + "path" + "strconv" + + "github.com/go-openapi/analysis/internal/debug" + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" +) + +const definitionsPath = "#/definitions" + +var debugLog = debug.GetLogger("analysis/flatten/replace", os.Getenv("SWAGGER_DEBUG") != "") + +// RewriteSchemaToRef replaces a schema with a Ref +func RewriteSchemaToRef(sp *spec.Swagger, key string, ref spec.Ref) error { + debugLog("rewriting schema to ref for %s with %s", key, ref.String()) + _, value, err := getPointerFromKey(sp, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *spec.Schema: + return rewriteParentRef(sp, key, ref) + + case spec.Schema: + return rewriteParentRef(sp, key, ref) + + case *spec.SchemaOrArray: + if refable.Schema != nil { + refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + } + + case *spec.SchemaOrBool: + if refable.Schema != nil { + refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + } + case map[string]interface{}: // this happens e.g. if a schema points to an extension unmarshaled as map[string]interface{} + return rewriteParentRef(sp, key, ref) + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +func rewriteParentRef(sp *spec.Swagger, key string, ref spec.Ref) error { + parent, entry, pvalue, err := getParentFromKey(sp, key) + if err != nil { + return err + } + + debugLog("rewriting holder for %T", pvalue) + switch container := pvalue.(type) { + case spec.Response: + if err := rewriteParentRef(sp, "#"+parent, ref); err != nil { + return err + } + + case *spec.Response: + container.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *spec.Responses: + statusCode, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", key[1:], err) + } + resp := container.StatusCodeResponses[statusCode] + resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container.StatusCodeResponses[statusCode] = resp + + case map[string]spec.Response: + resp := container[entry] + resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container[entry] = resp + + case spec.Parameter: + if err := rewriteParentRef(sp, "#"+parent, ref); err != nil { + return err + } + + case map[string]spec.Parameter: + param := container[entry] + param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container[entry] = param + + case []spec.Parameter: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", key[1:], err) + } + param := container[idx] + param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container[idx] = param + + case spec.Definitions: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case map[string]spec.Schema: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case []spec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", key[1:], err) + } + container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *spec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", key[1:], err) + } + container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case spec.SchemaProperties: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *interface{}: + *container = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled parent schema rewrite %s (%T)", key, pvalue) + } + + return nil +} + +// getPointerFromKey retrieves the content of the JSON pointer "key" +func getPointerFromKey(sp interface{}, key string) (string, interface{}, error) { + switch sp.(type) { + case *spec.Schema: + case *spec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + if key == "#/" { + return "", sp, nil + } + // unescape chars in key, e.g. "{}" from path params + pth, _ := url.PathUnescape(key[1:]) + ptr, err := jsonpointer.New(pth) + if err != nil { + return "", nil, err + } + + value, _, err := ptr.Get(sp) + if err != nil { + debugLog("error when getting key: %s with path: %s", key, pth) + + return "", nil, err + } + + return pth, value, nil +} + +// getParentFromKey retrieves the container of the JSON pointer "key" +func getParentFromKey(sp interface{}, key string) (string, string, interface{}, error) { + switch sp.(type) { + case *spec.Schema: + case *spec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + // unescape chars in key, e.g. "{}" from path params + pth, _ := url.PathUnescape(key[1:]) + + parent, entry := path.Dir(pth), path.Base(pth) + debugLog("getting schema holder at: %s, with entry: %s", parent, entry) + + pptr, err := jsonpointer.New(parent) + if err != nil { + return "", "", nil, err + } + pvalue, _, err := pptr.Get(sp) + if err != nil { + return "", "", nil, fmt.Errorf("can't get parent for %s: %w", parent, err) + } + + return parent, entry, pvalue, nil +} + +// UpdateRef replaces a ref by another one +func UpdateRef(sp interface{}, key string, ref spec.Ref) error { + switch sp.(type) { + case *spec.Schema: + case *spec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + debugLog("updating ref for %s with %s", key, ref.String()) + pth, value, err := getPointerFromKey(sp, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *spec.Schema: + refable.Ref = ref + case *spec.SchemaOrArray: + if refable.Schema != nil { + refable.Schema.Ref = ref + } + case *spec.SchemaOrBool: + if refable.Schema != nil { + refable.Schema.Ref = ref + } + case spec.Schema: + debugLog("rewriting holder for %T", refable) + _, entry, pvalue, erp := getParentFromKey(sp, key) + if erp != nil { + return err + } + switch container := pvalue.(type) { + case spec.Definitions: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case map[string]spec.Schema: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case []spec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", pth, err) + } + container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *spec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", pth, err) + } + container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case spec.SchemaProperties: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled container type at %s: %T", key, value) + } + + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +// UpdateRefWithSchema replaces a ref with a schema (i.e. re-inline schema) +func UpdateRefWithSchema(sp *spec.Swagger, key string, sch *spec.Schema) error { + debugLog("updating ref for %s with schema", key) + pth, value, err := getPointerFromKey(sp, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *spec.Schema: + *refable = *sch + case spec.Schema: + _, entry, pvalue, erp := getParentFromKey(sp, key) + if erp != nil { + return err + } + switch container := pvalue.(type) { + case spec.Definitions: + container[entry] = *sch + + case map[string]spec.Schema: + container[entry] = *sch + + case []spec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", pth, err) + } + container[idx] = *sch + + case *spec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", pth, err) + } + container.Schemas[idx] = *sch + + case spec.SchemaProperties: + container[entry] = *sch + + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled type for parent of [%s]: %T", key, value) + } + case *spec.SchemaOrArray: + *refable.Schema = *sch + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + case *spec.SchemaOrBool: + *refable.Schema = *sch + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +// DeepestRefResult holds the results from DeepestRef analysis +type DeepestRefResult struct { + Ref spec.Ref + Schema *spec.Schema + Warnings []string +} + +// DeepestRef finds the first definition ref, from a cascade of nested refs which are not definitions. +// - if no definition is found, returns the deepest ref. +// - pointers to external files are expanded +// +// NOTE: all external $ref's are assumed to be already expanded at this stage. +func DeepestRef(sp *spec.Swagger, opts *spec.ExpandOptions, ref spec.Ref) (*DeepestRefResult, error) { + if !ref.HasFragmentOnly { + // we found an external $ref, which is odd at this stage: + // do nothing on external $refs + return &DeepestRefResult{Ref: ref}, nil + } + + currentRef := ref + visited := make(map[string]bool, 64) + warnings := make([]string, 0, 2) + +DOWNREF: + for currentRef.String() != "" { + if path.Dir(currentRef.String()) == definitionsPath { + // this is a top-level definition: stop here and return this ref + return &DeepestRefResult{Ref: currentRef}, nil + } + + if _, beenThere := visited[currentRef.String()]; beenThere { + return nil, + fmt.Errorf("cannot resolve cyclic chain of pointers under %s", currentRef.String()) + } + + visited[currentRef.String()] = true + value, _, err := currentRef.GetPointer().Get(sp) + if err != nil { + return nil, err + } + + switch refable := value.(type) { + case *spec.Schema: + if refable.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Ref + + case spec.Schema: + if refable.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Ref + + case *spec.SchemaOrArray: + if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Schema.Ref + + case *spec.SchemaOrBool: + if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Schema.Ref + + case spec.Response: + // a pointer points to a schema initially marshalled in responses section... + // Attempt to convert this to a schema. If this fails, the spec is invalid + asJSON, _ := refable.MarshalJSON() + var asSchema spec.Schema + + err := asSchema.UnmarshalJSON(asJSON) + if err != nil { + return nil, + fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)", + currentRef.String(), value, err, + ) + } + warnings = append(warnings, fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String())) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + + case spec.Parameter: + // a pointer points to a schema initially marshalled in parameters section... + // Attempt to convert this to a schema. If this fails, the spec is invalid + asJSON, _ := refable.MarshalJSON() + var asSchema spec.Schema + if err := asSchema.UnmarshalJSON(asJSON); err != nil { + return nil, + fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)", + currentRef.String(), value, err, + ) + } + + warnings = append(warnings, fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String())) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + + default: + // fallback: attempts to resolve the pointer as a schema + if refable == nil { + break DOWNREF + } + + asJSON, _ := json.Marshal(refable) + var asSchema spec.Schema + if err := asSchema.UnmarshalJSON(asJSON); err != nil { + return nil, + fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T (%v)", + currentRef.String(), value, err, + ) + } + warnings = append(warnings, fmt.Sprintf("found $ref %q (%T) interpreted as schema", currentRef.String(), refable)) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + } + } + + // assess what schema we're ending with + sch, erv := spec.ResolveRefWithBase(sp, ¤tRef, opts) + if erv != nil { + return nil, erv + } + + if sch == nil { + return nil, fmt.Errorf("no schema found at %s", currentRef.String()) + } + + return &DeepestRefResult{Ref: currentRef, Schema: sch, Warnings: warnings}, nil +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go b/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go new file mode 100644 index 0000000000..4590236e68 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go @@ -0,0 +1,29 @@ +// Package schutils provides tools to save or clone a schema +// when flattening a spec. +package schutils + +import ( + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// Save registers a schema as an entry in spec #/definitions +func Save(sp *spec.Swagger, name string, schema *spec.Schema) { + if schema == nil { + return + } + + if sp.Definitions == nil { + sp.Definitions = make(map[string]spec.Schema, 150) + } + + sp.Definitions[name] = *schema +} + +// Clone deep-clones a schema +func Clone(schema *spec.Schema) *spec.Schema { + var sch spec.Schema + _ = swag.FromDynamicJSON(schema, &sch) + + return &sch +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go new file mode 100644 index 0000000000..ac80fc2e83 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go @@ -0,0 +1,201 @@ +package sortref + +import ( + "net/http" + "path" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" +) + +const ( + paths = "paths" + responses = "responses" + parameters = "parameters" + definitions = "definitions" +) + +var ( + ignoredKeys map[string]struct{} + validMethods map[string]struct{} +) + +func init() { + ignoredKeys = map[string]struct{}{ + "schema": {}, + "properties": {}, + "not": {}, + "anyOf": {}, + "oneOf": {}, + } + + validMethods = map[string]struct{}{ + "GET": {}, + "HEAD": {}, + "OPTIONS": {}, + "PATCH": {}, + "POST": {}, + "PUT": {}, + "DELETE": {}, + } +} + +// Key represent a key item constructed from /-separated segments +type Key struct { + Segments int + Key string +} + +// Keys is a sortable collable collection of Keys +type Keys []Key + +func (k Keys) Len() int { return len(k) } +func (k Keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k Keys) Less(i, j int) bool { + return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key) +} + +// KeyParts construct a SplitKey with all its /-separated segments decomposed. It is sortable. +func KeyParts(key string) SplitKey { + var res []string + for _, part := range strings.Split(key[1:], "/") { + if part != "" { + res = append(res, jsonpointer.Unescape(part)) + } + } + + return res +} + +// SplitKey holds of the parts of a /-separated key, so that their location may be determined. +type SplitKey []string + +// IsDefinition is true when the split key is in the #/definitions section of a spec +func (s SplitKey) IsDefinition() bool { + return len(s) > 1 && s[0] == definitions +} + +// DefinitionName yields the name of the definition +func (s SplitKey) DefinitionName() string { + if !s.IsDefinition() { + return "" + } + + return s[1] +} + +func (s SplitKey) isKeyName(i int) bool { + if i <= 0 { + return false + } + + count := 0 + for idx := i - 1; idx > 0; idx-- { + if s[idx] != "properties" { + break + } + count++ + } + + return count%2 != 0 +} + +// PartAdder know how to construct the components of a new name +type PartAdder func(string) []string + +// BuildName builds a name from segments +func (s SplitKey) BuildName(segments []string, startIndex int, adder PartAdder) string { + for i, part := range s[startIndex:] { + if _, ignored := ignoredKeys[part]; !ignored || s.isKeyName(startIndex+i) { + segments = append(segments, adder(part)...) + } + } + + return strings.Join(segments, " ") +} + +// IsOperation is true when the split key is in the operations section +func (s SplitKey) IsOperation() bool { + return len(s) > 1 && s[0] == paths +} + +// IsSharedOperationParam is true when the split key is in the parameters section of a path +func (s SplitKey) IsSharedOperationParam() bool { + return len(s) > 2 && s[0] == paths && s[2] == parameters +} + +// IsSharedParam is true when the split key is in the #/parameters section of a spec +func (s SplitKey) IsSharedParam() bool { + return len(s) > 1 && s[0] == parameters +} + +// IsOperationParam is true when the split key is in the parameters section of an operation +func (s SplitKey) IsOperationParam() bool { + return len(s) > 3 && s[0] == paths && s[3] == parameters +} + +// IsOperationResponse is true when the split key is in the responses section of an operation +func (s SplitKey) IsOperationResponse() bool { + return len(s) > 3 && s[0] == paths && s[3] == responses +} + +// IsSharedResponse is true when the split key is in the #/responses section of a spec +func (s SplitKey) IsSharedResponse() bool { + return len(s) > 1 && s[0] == responses +} + +// IsDefaultResponse is true when the split key is the default response for an operation +func (s SplitKey) IsDefaultResponse() bool { + return len(s) > 4 && s[0] == paths && s[3] == responses && s[4] == "default" +} + +// IsStatusCodeResponse is true when the split key is an operation response with a status code +func (s SplitKey) IsStatusCodeResponse() bool { + isInt := func() bool { + _, err := strconv.Atoi(s[4]) + + return err == nil + } + + return len(s) > 4 && s[0] == paths && s[3] == responses && isInt() +} + +// ResponseName yields either the status code or "Default" for a response +func (s SplitKey) ResponseName() string { + if s.IsStatusCodeResponse() { + code, _ := strconv.Atoi(s[4]) + + return http.StatusText(code) + } + + if s.IsDefaultResponse() { + return "Default" + } + + return "" +} + +// PathItemRef constructs a $ref object from a split key of the form /{path}/{method} +func (s SplitKey) PathItemRef() spec.Ref { + if len(s) < 3 { + return spec.Ref{} + } + + pth, method := s[1], s[2] + if _, isValidMethod := validMethods[strings.ToUpper(method)]; !isValidMethod && !strings.HasPrefix(method, "x-") { + return spec.Ref{} + } + + return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(pth), strings.ToUpper(method))) +} + +// PathRef constructs a $ref object from a split key of the form /paths/{reference} +func (s SplitKey) PathRef() spec.Ref { + if !s.IsOperation() { + return spec.Ref{} + } + + return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(s[1]))) +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go new file mode 100644 index 0000000000..73243df87f --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go @@ -0,0 +1,141 @@ +package sortref + +import ( + "reflect" + "sort" + "strings" + + "github.com/go-openapi/analysis/internal/flatten/normalize" + "github.com/go-openapi/spec" +) + +var depthGroupOrder = []string{ + "sharedParam", "sharedResponse", "sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition", +} + +type mapIterator struct { + len int + mapIter *reflect.MapIter +} + +func (i *mapIterator) Next() bool { + return i.mapIter.Next() +} + +func (i *mapIterator) Len() int { + return i.len +} + +func (i *mapIterator) Key() string { + return i.mapIter.Key().String() +} + +func mustMapIterator(anyMap interface{}) *mapIterator { + val := reflect.ValueOf(anyMap) + + return &mapIterator{mapIter: val.MapRange(), len: val.Len()} +} + +// DepthFirst sorts a map of anything. It groups keys by category +// (shared params, op param, statuscode response, default response, definitions) +// sort groups internally by number of parts in the key and lexical names +// flatten groups into a single list of keys +func DepthFirst(in interface{}) []string { + iterator := mustMapIterator(in) + sorted := make([]string, 0, iterator.Len()) + grouped := make(map[string]Keys, iterator.Len()) + + for iterator.Next() { + k := iterator.Key() + split := KeyParts(k) + var pk string + + if split.IsSharedOperationParam() { + pk = "sharedOpParam" + } + if split.IsOperationParam() { + pk = "opParam" + } + if split.IsStatusCodeResponse() { + pk = "codeResponse" + } + if split.IsDefaultResponse() { + pk = "defaultResponse" + } + if split.IsDefinition() { + pk = "definition" + } + if split.IsSharedParam() { + pk = "sharedParam" + } + if split.IsSharedResponse() { + pk = "sharedResponse" + } + grouped[pk] = append(grouped[pk], Key{Segments: len(split), Key: k}) + } + + for _, pk := range depthGroupOrder { + res := grouped[pk] + sort.Sort(res) + + for _, v := range res { + sorted = append(sorted, v.Key) + } + } + + return sorted +} + +// topMostRefs is able to sort refs by hierarchical then lexicographic order, +// yielding refs ordered breadth-first. +type topmostRefs []string + +func (k topmostRefs) Len() int { return len(k) } +func (k topmostRefs) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k topmostRefs) Less(i, j int) bool { + li, lj := len(strings.Split(k[i], "/")), len(strings.Split(k[j], "/")) + if li == lj { + return k[i] < k[j] + } + + return li < lj +} + +// TopmostFirst sorts references by depth +func TopmostFirst(refs []string) []string { + res := topmostRefs(refs) + sort.Sort(res) + + return res +} + +// RefRevIdx is a reverse index for references +type RefRevIdx struct { + Ref spec.Ref + Keys []string +} + +// ReverseIndex builds a reverse index for references in schemas +func ReverseIndex(schemas map[string]spec.Ref, basePath string) map[string]RefRevIdx { + collected := make(map[string]RefRevIdx) + for key, schRef := range schemas { + // normalize paths before sorting, + // so we get together keys that are from the same external file + normalizedPath := normalize.Path(schRef, basePath) + + entry, ok := collected[normalizedPath] + if ok { + entry.Keys = append(entry.Keys, key) + collected[normalizedPath] = entry + + continue + } + + collected[normalizedPath] = RefRevIdx{ + Ref: schRef, + Keys: []string{key}, + } + } + + return collected +} diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go new file mode 100644 index 0000000000..7785a29b27 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/mixin.go @@ -0,0 +1,515 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/spec" +) + +// Mixin modifies the primary swagger spec by adding the paths and +// definitions from the mixin specs. Top level parameters and +// responses from the mixins are also carried over. Operation id +// collisions are avoided by appending "Mixin" but only if +// needed. +// +// The following parts of primary are subject to merge, filling empty details +// - Info +// - BasePath +// - Host +// - ExternalDocs +// +// Consider calling FixEmptyResponseDescriptions() on the modified primary +// if you read them from storage and they are valid to start with. +// +// Entries in "paths", "definitions", "parameters" and "responses" are +// added to the primary in the order of the given mixins. If the entry +// already exists in primary it is skipped with a warning message. +// +// The count of skipped entries (from collisions) is returned so any +// deviation from the number expected can flag a warning in your build +// scripts. Carefully review the collisions before accepting them; +// consider renaming things if possible. +// +// No key normalization takes place (paths, type defs, +// etc). Ensure they are canonical if your downstream tools do +// key normalization of any form. +// +// Merging schemes (http, https), and consumers/producers do not account for +// collisions. +func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { + skipped := make([]string, 0, len(mixins)) + opIDs := getOpIDs(primary) + initPrimary(primary) + + for i, m := range mixins { + skipped = append(skipped, mergeSwaggerProps(primary, m)...) + + skipped = append(skipped, mergeConsumes(primary, m)...) + + skipped = append(skipped, mergeProduces(primary, m)...) + + skipped = append(skipped, mergeTags(primary, m)...) + + skipped = append(skipped, mergeSchemes(primary, m)...) + + skipped = append(skipped, mergeSecurityDefinitions(primary, m)...) + + skipped = append(skipped, mergeSecurityRequirements(primary, m)...) + + skipped = append(skipped, mergeDefinitions(primary, m)...) + + // merging paths requires a map of operationIDs to work with + skipped = append(skipped, mergePaths(primary, m, opIDs, i)...) + + skipped = append(skipped, mergeParameters(primary, m)...) + + skipped = append(skipped, mergeResponses(primary, m)...) + } + + return skipped +} + +// getOpIDs extracts all the paths..operationIds from the given +// spec and returns them as the keys in a map with 'true' values. +func getOpIDs(s *spec.Swagger) map[string]bool { + rv := make(map[string]bool) + if s.Paths == nil { + return rv + } + + for _, v := range s.Paths.Paths { + piops := pathItemOps(v) + + for _, op := range piops { + rv[op.ID] = true + } + } + + return rv +} + +func pathItemOps(p spec.PathItem) []*spec.Operation { + var rv []*spec.Operation + rv = appendOp(rv, p.Get) + rv = appendOp(rv, p.Put) + rv = appendOp(rv, p.Post) + rv = appendOp(rv, p.Delete) + rv = appendOp(rv, p.Head) + rv = appendOp(rv, p.Patch) + + return rv +} + +func appendOp(ops []*spec.Operation, op *spec.Operation) []*spec.Operation { + if op == nil { + return ops + } + + return append(ops, op) +} + +func mergeSecurityDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.SecurityDefinitions { + if _, exists := primary.SecurityDefinitions[k]; exists { + warn := fmt.Sprintf( + "SecurityDefinitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + + primary.SecurityDefinitions[k] = v + } + + return +} + +func mergeSecurityRequirements(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for _, v := range m.Security { + found := false + for _, vv := range primary.Security { + if reflect.DeepEqual(v, vv) { + found = true + + break + } + } + + if found { + warn := fmt.Sprintf( + "Security requirement: '%v' already exists in primary or higher priority mixin, skipping\n", v) + skipped = append(skipped, warn) + + continue + } + primary.Security = append(primary.Security, v) + } + + return +} + +func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Definitions { + // assume name collisions represent IDENTICAL type. careful. + if _, exists := primary.Definitions[k]; exists { + warn := fmt.Sprintf( + "definitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + primary.Definitions[k] = v + } + + return +} + +func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIDs map[string]bool, mixIndex int) (skipped []string) { + if m.Paths != nil { + for k, v := range m.Paths.Paths { + if _, exists := primary.Paths.Paths[k]; exists { + warn := fmt.Sprintf( + "paths entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + + // Swagger requires that operationIds be + // unique within a spec. If we find a + // collision we append "Mixin0" to the + // operatoinId we are adding, where 0 is mixin + // index. We assume that operationIds with + // all the proivded specs are already unique. + piops := pathItemOps(v) + for _, piop := range piops { + if opIDs[piop.ID] { + piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex) + } + opIDs[piop.ID] = true + } + primary.Paths.Paths[k] = v + } + } + + return +} + +func mergeParameters(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Parameters { + // could try to rename on conflict but would + // have to fix $refs in the mixin. Complain + // for now + if _, exists := primary.Parameters[k]; exists { + warn := fmt.Sprintf( + "top level parameters entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + primary.Parameters[k] = v + } + + return +} + +func mergeResponses(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Responses { + // could try to rename on conflict but would + // have to fix $refs in the mixin. Complain + // for now + if _, exists := primary.Responses[k]; exists { + warn := fmt.Sprintf( + "top level responses entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + primary.Responses[k] = v + } + + return skipped +} + +func mergeConsumes(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Consumes { + found := false + for _, vv := range primary.Consumes { + if v == vv { + found = true + + break + } + } + + if found { + // no warning here: we just skip it + continue + } + primary.Consumes = append(primary.Consumes, v) + } + + return []string{} +} + +func mergeProduces(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Produces { + found := false + for _, vv := range primary.Produces { + if v == vv { + found = true + + break + } + } + + if found { + // no warning here: we just skip it + continue + } + primary.Produces = append(primary.Produces, v) + } + + return []string{} +} + +func mergeTags(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for _, v := range m.Tags { + found := false + for _, vv := range primary.Tags { + if v.Name == vv.Name { + found = true + + break + } + } + + if found { + warn := fmt.Sprintf( + "top level tags entry with name '%v' already exists in primary or higher priority mixin, skipping\n", + v.Name, + ) + skipped = append(skipped, warn) + + continue + } + + primary.Tags = append(primary.Tags, v) + } + + return +} + +func mergeSchemes(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Schemes { + found := false + for _, vv := range primary.Schemes { + if v == vv { + found = true + + break + } + } + + if found { + // no warning here: we just skip it + continue + } + primary.Schemes = append(primary.Schemes, v) + } + + return []string{} +} + +func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string { + var skipped, skippedInfo, skippedDocs []string + + primary.Extensions, skipped = mergeExtensions(primary.Extensions, m.Extensions) + + // merging details in swagger top properties + if primary.Host == "" { + primary.Host = m.Host + } + + if primary.BasePath == "" { + primary.BasePath = m.BasePath + } + + if primary.Info == nil { + primary.Info = m.Info + } else if m.Info != nil { + skippedInfo = mergeInfo(primary.Info, m.Info) + skipped = append(skipped, skippedInfo...) + } + + if primary.ExternalDocs == nil { + primary.ExternalDocs = m.ExternalDocs + } else if m != nil { + skippedDocs = mergeExternalDocs(primary.ExternalDocs, m.ExternalDocs) + skipped = append(skipped, skippedDocs...) + } + + return skipped +} + +//nolint:unparam +func mergeExternalDocs(primary *spec.ExternalDocumentation, m *spec.ExternalDocumentation) []string { + if primary.Description == "" { + primary.Description = m.Description + } + + if primary.URL == "" { + primary.URL = m.URL + } + + return nil +} + +func mergeInfo(primary *spec.Info, m *spec.Info) []string { + var sk, skipped []string + + primary.Extensions, sk = mergeExtensions(primary.Extensions, m.Extensions) + skipped = append(skipped, sk...) + + if primary.Description == "" { + primary.Description = m.Description + } + + if primary.Title == "" { + primary.Description = m.Description + } + + if primary.TermsOfService == "" { + primary.TermsOfService = m.TermsOfService + } + + if primary.Version == "" { + primary.Version = m.Version + } + + if primary.Contact == nil { + primary.Contact = m.Contact + } else if m.Contact != nil { + var csk []string + primary.Contact.Extensions, csk = mergeExtensions(primary.Contact.Extensions, m.Contact.Extensions) + skipped = append(skipped, csk...) + + if primary.Contact.Name == "" { + primary.Contact.Name = m.Contact.Name + } + + if primary.Contact.URL == "" { + primary.Contact.URL = m.Contact.URL + } + + if primary.Contact.Email == "" { + primary.Contact.Email = m.Contact.Email + } + } + + if primary.License == nil { + primary.License = m.License + } else if m.License != nil { + var lsk []string + primary.License.Extensions, lsk = mergeExtensions(primary.License.Extensions, m.License.Extensions) + skipped = append(skipped, lsk...) + + if primary.License.Name == "" { + primary.License.Name = m.License.Name + } + + if primary.License.URL == "" { + primary.License.URL = m.License.URL + } + } + + return skipped +} + +func mergeExtensions(primary spec.Extensions, m spec.Extensions) (result spec.Extensions, skipped []string) { + if primary == nil { + result = m + + return + } + + if m == nil { + result = primary + + return + } + + result = primary + for k, v := range m { + if _, found := primary[k]; found { + skipped = append(skipped, k) + + continue + } + + primary[k] = v + } + + return +} + +func initPrimary(primary *spec.Swagger) { + if primary.SecurityDefinitions == nil { + primary.SecurityDefinitions = make(map[string]*spec.SecurityScheme) + } + + if primary.Security == nil { + primary.Security = make([]map[string][]string, 0, 10) + } + + if primary.Produces == nil { + primary.Produces = make([]string, 0, 10) + } + + if primary.Consumes == nil { + primary.Consumes = make([]string, 0, 10) + } + + if primary.Tags == nil { + primary.Tags = make([]spec.Tag, 0, 10) + } + + if primary.Schemes == nil { + primary.Schemes = make([]string, 0, 10) + } + + if primary.Paths == nil { + primary.Paths = &spec.Paths{Paths: make(map[string]spec.PathItem)} + } + + if primary.Paths.Paths == nil { + primary.Paths.Paths = make(map[string]spec.PathItem) + } + + if primary.Definitions == nil { + primary.Definitions = make(spec.Definitions) + } + + if primary.Parameters == nil { + primary.Parameters = make(map[string]spec.Parameter) + } + + if primary.Responses == nil { + primary.Responses = make(map[string]spec.Response) + } +} diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go new file mode 100644 index 0000000000..ab190db5b7 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/schema.go @@ -0,0 +1,256 @@ +package analysis + +import ( + "errors" + + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// SchemaOpts configures the schema analyzer +type SchemaOpts struct { + Schema *spec.Schema + Root interface{} + BasePath string + _ struct{} +} + +// Schema analysis, will classify the schema according to known +// patterns. +func Schema(opts SchemaOpts) (*AnalyzedSchema, error) { + if opts.Schema == nil { + return nil, errors.New("no schema to analyze") + } + + a := &AnalyzedSchema{ + schema: opts.Schema, + root: opts.Root, + basePath: opts.BasePath, + } + + a.initializeFlags() + a.inferKnownType() + a.inferEnum() + a.inferBaseType() + + if err := a.inferMap(); err != nil { + return nil, err + } + if err := a.inferArray(); err != nil { + return nil, err + } + + a.inferTuple() + + if err := a.inferFromRef(); err != nil { + return nil, err + } + + a.inferSimpleSchema() + + return a, nil +} + +// AnalyzedSchema indicates what the schema represents +type AnalyzedSchema struct { + schema *spec.Schema + root interface{} + basePath string + + hasProps bool + hasAllOf bool + hasItems bool + hasAdditionalProps bool + hasAdditionalItems bool + hasRef bool + + IsKnownType bool + IsSimpleSchema bool + IsArray bool + IsSimpleArray bool + IsMap bool + IsSimpleMap bool + IsExtendedObject bool + IsTuple bool + IsTupleWithExtra bool + IsBaseType bool + IsEnum bool +} + +// Inherits copies value fields from other onto this schema +func (a *AnalyzedSchema) inherits(other *AnalyzedSchema) { + if other == nil { + return + } + a.hasProps = other.hasProps + a.hasAllOf = other.hasAllOf + a.hasItems = other.hasItems + a.hasAdditionalItems = other.hasAdditionalItems + a.hasAdditionalProps = other.hasAdditionalProps + a.hasRef = other.hasRef + + a.IsKnownType = other.IsKnownType + a.IsSimpleSchema = other.IsSimpleSchema + a.IsArray = other.IsArray + a.IsSimpleArray = other.IsSimpleArray + a.IsMap = other.IsMap + a.IsSimpleMap = other.IsSimpleMap + a.IsExtendedObject = other.IsExtendedObject + a.IsTuple = other.IsTuple + a.IsTupleWithExtra = other.IsTupleWithExtra + a.IsBaseType = other.IsBaseType + a.IsEnum = other.IsEnum +} + +func (a *AnalyzedSchema) inferFromRef() error { + if a.hasRef { + sch := new(spec.Schema) + sch.Ref = a.schema.Ref + err := spec.ExpandSchema(sch, a.root, nil) + if err != nil { + return err + } + rsch, err := Schema(SchemaOpts{ + Schema: sch, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + // NOTE(fredbi): currently the only cause for errors is + // unresolved ref. Since spec.ExpandSchema() expands the + // schema recursively, there is no chance to get there, + // until we add more causes for error in this schema analysis. + return err + } + a.inherits(rsch) + } + + return nil +} + +func (a *AnalyzedSchema) inferSimpleSchema() { + a.IsSimpleSchema = a.IsKnownType || a.IsSimpleArray || a.IsSimpleMap +} + +func (a *AnalyzedSchema) inferKnownType() { + tpe := a.schema.Type + format := a.schema.Format + a.IsKnownType = tpe.Contains("boolean") || + tpe.Contains("integer") || + tpe.Contains("number") || + tpe.Contains("string") || + (format != "" && strfmt.Default.ContainsName(format)) || + (a.isObjectType() && !a.hasProps && !a.hasAllOf && !a.hasAdditionalProps && !a.hasAdditionalItems) +} + +func (a *AnalyzedSchema) inferMap() error { + if !a.isObjectType() { + return nil + } + + hasExtra := a.hasProps || a.hasAllOf + a.IsMap = a.hasAdditionalProps && !hasExtra + a.IsExtendedObject = a.hasAdditionalProps && hasExtra + + if !a.IsMap { + return nil + } + + // maps + if a.schema.AdditionalProperties.Schema != nil { + msch, err := Schema(SchemaOpts{ + Schema: a.schema.AdditionalProperties.Schema, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + a.IsSimpleMap = msch.IsSimpleSchema + } else if a.schema.AdditionalProperties.Allows { + a.IsSimpleMap = true + } + + return nil +} + +func (a *AnalyzedSchema) inferArray() error { + // an array has Items defined as an object schema, otherwise we qualify this JSON array as a tuple + // (yes, even if the Items array contains only one element). + // arrays in JSON schema may be unrestricted (i.e no Items specified). + // Note that arrays in Swagger MUST have Items. Nonetheless, we analyze unrestricted arrays. + // + // NOTE: the spec package misses the distinction between: + // items: [] and items: {}, so we consider both arrays here. + a.IsArray = a.isArrayType() && (a.schema.Items == nil || a.schema.Items.Schemas == nil) + if a.IsArray && a.hasItems { + if a.schema.Items.Schema != nil { + itsch, err := Schema(SchemaOpts{ + Schema: a.schema.Items.Schema, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + + a.IsSimpleArray = itsch.IsSimpleSchema + } + } + + if a.IsArray && !a.hasItems { + a.IsSimpleArray = true + } + + return nil +} + +func (a *AnalyzedSchema) inferTuple() { + tuple := a.hasItems && a.schema.Items.Schemas != nil + a.IsTuple = tuple && !a.hasAdditionalItems + a.IsTupleWithExtra = tuple && a.hasAdditionalItems +} + +func (a *AnalyzedSchema) inferBaseType() { + if a.isObjectType() { + a.IsBaseType = a.schema.Discriminator != "" + } +} + +func (a *AnalyzedSchema) inferEnum() { + a.IsEnum = len(a.schema.Enum) > 0 +} + +func (a *AnalyzedSchema) initializeFlags() { + a.hasProps = len(a.schema.Properties) > 0 + a.hasAllOf = len(a.schema.AllOf) > 0 + a.hasRef = a.schema.Ref.String() != "" + + a.hasItems = a.schema.Items != nil && + (a.schema.Items.Schema != nil || len(a.schema.Items.Schemas) > 0) + + a.hasAdditionalProps = a.schema.AdditionalProperties != nil && + (a.schema.AdditionalProperties.Schema != nil || a.schema.AdditionalProperties.Allows) + + a.hasAdditionalItems = a.schema.AdditionalItems != nil && + (a.schema.AdditionalItems.Schema != nil || a.schema.AdditionalItems.Allows) +} + +func (a *AnalyzedSchema) isObjectType() bool { + return !a.hasRef && (a.schema.Type == nil || a.schema.Type.Contains("") || a.schema.Type.Contains("object")) +} + +func (a *AnalyzedSchema) isArrayType() bool { + return !a.hasRef && (a.schema.Type != nil && a.schema.Type.Contains("array")) +} + +// isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex"). +// +// Complex means the schema is any of: +// - a simple type (primitive) +// - an array of something (items are possibly complex ; if this is the case, items will generate a definition) +// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will +// generate a definition) +func (a *AnalyzedSchema) isAnalyzedAsComplex() bool { + return !a.IsSimpleSchema && !a.IsArray && !a.IsMap +} diff --git a/vendor/github.com/go-openapi/errors/.gitattributes b/vendor/github.com/go-openapi/errors/.gitattributes new file mode 100644 index 0000000000..a0717e4b3b --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf \ No newline at end of file diff --git a/vendor/github.com/go-openapi/errors/.gitignore b/vendor/github.com/go-openapi/errors/.gitignore new file mode 100644 index 0000000000..dd91ed6a04 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml new file mode 100644 index 0000000000..ee8b9bd1f1 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.golangci.yml @@ -0,0 +1,55 @@ +linters-settings: + gocyclo: + min-complexity: 45 + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + #- deadcode + #- interfacer + #- scopelint + #- varcheck + #- structcheck + #- golint + #- nosnakecase + #- maligned + #- goerr113 + #- ifshort + #- gomnd + #- exhaustivestruct diff --git a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/errors/LICENSE b/vendor/github.com/go-openapi/errors/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md new file mode 100644 index 0000000000..6d57ea55c7 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/README.md @@ -0,0 +1,8 @@ +# OpenAPI errors [![Build Status](https://github.com/go-openapi/errors/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/errors/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/errors.svg)](https://pkg.go.dev/github.com/go-openapi/errors) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/errors)](https://goreportcard.com/report/github.com/go-openapi/errors) + +Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit. diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go new file mode 100644 index 0000000000..d6f507f42d --- /dev/null +++ b/vendor/github.com/go-openapi/errors/api.go @@ -0,0 +1,192 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "strings" +) + +// DefaultHTTPCode is used when the error Code cannot be used as an HTTP code. +var DefaultHTTPCode = http.StatusUnprocessableEntity + +// Error represents a error interface all swagger framework errors implement +type Error interface { + error + Code() int32 +} + +type apiError struct { + code int32 + message string +} + +func (a *apiError) Error() string { + return a.message +} + +func (a *apiError) Code() int32 { + return a.code +} + +// MarshalJSON implements the JSON encoding interface +func (a apiError) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "code": a.code, + "message": a.message, + }) +} + +// New creates a new API error with a code and a message +func New(code int32, message string, args ...interface{}) Error { + if len(args) > 0 { + return &apiError{ + code: code, + message: fmt.Sprintf(message, args...), + } + } + return &apiError{ + code: code, + message: message, + } +} + +// NotFound creates a new not found error +func NotFound(message string, args ...interface{}) Error { + if message == "" { + message = "Not found" + } + return New(http.StatusNotFound, fmt.Sprintf(message, args...)) +} + +// NotImplemented creates a new not implemented error +func NotImplemented(message string) Error { + return New(http.StatusNotImplemented, message) +} + +// MethodNotAllowedError represents an error for when the path matches but the method doesn't +type MethodNotAllowedError struct { + code int32 + Allowed []string + message string +} + +func (m *MethodNotAllowedError) Error() string { + return m.message +} + +// Code the error code +func (m *MethodNotAllowedError) Code() int32 { + return m.code +} + +// MarshalJSON implements the JSON encoding interface +func (m MethodNotAllowedError) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "code": m.code, + "message": m.message, + "allowed": m.Allowed, + }) +} + +func errorAsJSON(err Error) []byte { + //nolint:errchkjson + b, _ := json.Marshal(struct { + Code int32 `json:"code"` + Message string `json:"message"` + }{err.Code(), err.Error()}) + return b +} + +func flattenComposite(errs *CompositeError) *CompositeError { + var res []error + for _, er := range errs.Errors { + switch e := er.(type) { + case *CompositeError: + if e != nil && len(e.Errors) > 0 { + flat := flattenComposite(e) + if len(flat.Errors) > 0 { + res = append(res, flat.Errors...) + } + } + default: + if e != nil { + res = append(res, e) + } + } + } + return CompositeValidationError(res...) +} + +// MethodNotAllowed creates a new method not allowed error +func MethodNotAllowed(requested string, allow []string) Error { + msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ",")) + return &MethodNotAllowedError{ + code: http.StatusMethodNotAllowed, + Allowed: allow, + message: msg, + } +} + +// ServeError implements the http error handler interface +func ServeError(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Set("Content-Type", "application/json") + switch e := err.(type) { + case *CompositeError: + er := flattenComposite(e) + // strips composite errors to first element only + if len(er.Errors) > 0 { + ServeError(rw, r, er.Errors[0]) + } else { + // guard against empty CompositeError (invalid construct) + ServeError(rw, r, nil) + } + case *MethodNotAllowedError: + rw.Header().Add("Allow", strings.Join(e.Allowed, ",")) + rw.WriteHeader(asHTTPCode(int(e.Code()))) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(e)) + } + case Error: + value := reflect.ValueOf(e) + if value.Kind() == reflect.Ptr && value.IsNil() { + rw.WriteHeader(http.StatusInternalServerError) + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) + return + } + rw.WriteHeader(asHTTPCode(int(e.Code()))) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(e)) + } + case nil: + rw.WriteHeader(http.StatusInternalServerError) + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) + default: + rw.WriteHeader(http.StatusInternalServerError) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, err.Error()))) + } + } +} + +func asHTTPCode(input int) int { + if input >= maximumValidHTTPCode { + return DefaultHTTPCode + } + return input +} diff --git a/vendor/github.com/go-openapi/errors/auth.go b/vendor/github.com/go-openapi/errors/auth.go new file mode 100644 index 0000000000..0545b501bd --- /dev/null +++ b/vendor/github.com/go-openapi/errors/auth.go @@ -0,0 +1,22 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import "net/http" + +// Unauthenticated returns an unauthenticated error +func Unauthenticated(scheme string) Error { + return New(http.StatusUnauthorized, "unauthenticated for %s", scheme) +} diff --git a/vendor/github.com/go-openapi/errors/doc.go b/vendor/github.com/go-openapi/errors/doc.go new file mode 100644 index 0000000000..af01190ce6 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/doc.go @@ -0,0 +1,26 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package errors provides an Error interface and several concrete types +implementing this interface to manage API errors and JSON-schema validation +errors. + +A middleware handler ServeError() is provided to serve the errors types +it defines. + +It is used throughout the various go-openapi toolkit libraries +(https://github.com/go-openapi). +*/ +package errors diff --git a/vendor/github.com/go-openapi/errors/headers.go b/vendor/github.com/go-openapi/errors/headers.go new file mode 100644 index 0000000000..6ea1151f41 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/headers.go @@ -0,0 +1,103 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" +) + +// Validation represents a failure of a precondition +type Validation struct { //nolint: errname + code int32 + Name string + In string + Value interface{} + message string + Values []interface{} +} + +func (e *Validation) Error() string { + return e.message +} + +// Code the error code +func (e *Validation) Code() int32 { + return e.code +} + +// MarshalJSON implements the JSON encoding interface +func (e Validation) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "code": e.code, + "message": e.message, + "in": e.In, + "name": e.Name, + "value": e.Value, + "values": e.Values, + }) +} + +// ValidateName sets the name for a validation or updates it for a nested property +func (e *Validation) ValidateName(name string) *Validation { + if name != "" { + if e.Name == "" { + e.Name = name + e.message = name + e.message + } else { + e.Name = name + "." + e.Name + e.message = name + "." + e.message + } + } + return e +} + +const ( + contentTypeFail = `unsupported media type %q, only %v are allowed` + responseFormatFail = `unsupported media type requested, only %v are available` +) + +// InvalidContentType error for an invalid content type +func InvalidContentType(value string, allowed []string) *Validation { + values := make([]interface{}, 0, len(allowed)) + for _, v := range allowed { + values = append(values, v) + } + return &Validation{ + code: http.StatusUnsupportedMediaType, + Name: "Content-Type", + In: "header", + Value: value, + Values: values, + message: fmt.Sprintf(contentTypeFail, value, allowed), + } +} + +// InvalidResponseFormat error for an unacceptable response format request +func InvalidResponseFormat(value string, allowed []string) *Validation { + values := make([]interface{}, 0, len(allowed)) + for _, v := range allowed { + values = append(values, v) + } + return &Validation{ + code: http.StatusNotAcceptable, + Name: "Accept", + In: "header", + Value: value, + Values: values, + message: fmt.Sprintf(responseFormatFail, allowed), + } +} diff --git a/vendor/github.com/go-openapi/errors/middleware.go b/vendor/github.com/go-openapi/errors/middleware.go new file mode 100644 index 0000000000..67f80386a2 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/middleware.go @@ -0,0 +1,50 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "bytes" + "fmt" + "strings" +) + +// APIVerificationFailed is an error that contains all the missing info for a mismatched section +// between the api registrations and the api spec +type APIVerificationFailed struct { //nolint: errname + Section string `json:"section,omitempty"` + MissingSpecification []string `json:"missingSpecification,omitempty"` + MissingRegistration []string `json:"missingRegistration,omitempty"` +} + +func (v *APIVerificationFailed) Error() string { + buf := bytes.NewBuffer(nil) + + hasRegMissing := len(v.MissingRegistration) > 0 + hasSpecMissing := len(v.MissingSpecification) > 0 + + if hasRegMissing { + buf.WriteString(fmt.Sprintf("missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section)) + } + + if hasRegMissing && hasSpecMissing { + buf.WriteString("\n") + } + + if hasSpecMissing { + buf.WriteString(fmt.Sprintf("missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section)) + } + + return buf.String() +} diff --git a/vendor/github.com/go-openapi/errors/parsing.go b/vendor/github.com/go-openapi/errors/parsing.go new file mode 100644 index 0000000000..ce1ef9cb67 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/parsing.go @@ -0,0 +1,79 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" +) + +// ParseError represents a parsing error +type ParseError struct { + code int32 + Name string + In string + Value string + Reason error + message string +} + +func (e *ParseError) Error() string { + return e.message +} + +// Code returns the http status code for this error +func (e *ParseError) Code() int32 { + return e.code +} + +// MarshalJSON implements the JSON encoding interface +func (e ParseError) MarshalJSON() ([]byte, error) { + var reason string + if e.Reason != nil { + reason = e.Reason.Error() + } + return json.Marshal(map[string]interface{}{ + "code": e.code, + "message": e.message, + "in": e.In, + "name": e.Name, + "value": e.Value, + "reason": reason, + }) +} + +const ( + parseErrorTemplContent = `parsing %s %s from %q failed, because %s` + parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s` +) + +// NewParseError creates a new parse error +func NewParseError(name, in, value string, reason error) *ParseError { + var msg string + if in == "" { + msg = fmt.Sprintf(parseErrorTemplContentNoIn, name, value, reason) + } else { + msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason) + } + return &ParseError{ + code: http.StatusBadRequest, + Name: name, + In: in, + Value: value, + Reason: reason, + message: msg, + } +} diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go new file mode 100644 index 0000000000..8f3239dfd9 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/schema.go @@ -0,0 +1,619 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" +) + +const ( + invalidType = "%s is an invalid type name" + typeFail = "%s in %s must be of type %s" + typeFailWithData = "%s in %s must be of type %s: %q" + typeFailWithError = "%s in %s must be of type %s, because: %s" + requiredFail = "%s in %s is required" + readOnlyFail = "%s in %s is readOnly" + tooLongMessage = "%s in %s should be at most %d chars long" + tooShortMessage = "%s in %s should be at least %d chars long" + patternFail = "%s in %s should match '%s'" + enumFail = "%s in %s should be one of %v" + multipleOfFail = "%s in %s should be a multiple of %v" + maximumIncFail = "%s in %s should be less than or equal to %v" + maximumExcFail = "%s in %s should be less than %v" + minIncFail = "%s in %s should be greater than or equal to %v" + minExcFail = "%s in %s should be greater than %v" + uniqueFail = "%s in %s shouldn't contain duplicates" + maximumItemsFail = "%s in %s should have at most %d items" + minItemsFail = "%s in %s should have at least %d items" + typeFailNoIn = "%s must be of type %s" + typeFailWithDataNoIn = "%s must be of type %s: %q" + typeFailWithErrorNoIn = "%s must be of type %s, because: %s" + requiredFailNoIn = "%s is required" + readOnlyFailNoIn = "%s is readOnly" + tooLongMessageNoIn = "%s should be at most %d chars long" + tooShortMessageNoIn = "%s should be at least %d chars long" + patternFailNoIn = "%s should match '%s'" + enumFailNoIn = "%s should be one of %v" + multipleOfFailNoIn = "%s should be a multiple of %v" + maximumIncFailNoIn = "%s should be less than or equal to %v" + maximumExcFailNoIn = "%s should be less than %v" + minIncFailNoIn = "%s should be greater than or equal to %v" + minExcFailNoIn = "%s should be greater than %v" + uniqueFailNoIn = "%s shouldn't contain duplicates" + maximumItemsFailNoIn = "%s should have at most %d items" + minItemsFailNoIn = "%s should have at least %d items" + noAdditionalItems = "%s in %s can't have additional items" + noAdditionalItemsNoIn = "%s can't have additional items" + tooFewProperties = "%s in %s should have at least %d properties" + tooFewPropertiesNoIn = "%s should have at least %d properties" + tooManyProperties = "%s in %s should have at most %d properties" + tooManyPropertiesNoIn = "%s should have at most %d properties" + unallowedProperty = "%s.%s in %s is a forbidden property" + unallowedPropertyNoIn = "%s.%s is a forbidden property" + failedAllPatternProps = "%s.%s in %s failed all pattern properties" + failedAllPatternPropsNoIn = "%s.%s failed all pattern properties" + multipleOfMustBePositive = "factor MultipleOf declared for %s must be positive: %v" +) + +const maximumValidHTTPCode = 600 + +// All code responses can be used to differentiate errors for different handling +// by the consuming program +const ( + // CompositeErrorCode remains 422 for backwards-compatibility + // and to separate it from validation errors with cause + CompositeErrorCode = http.StatusUnprocessableEntity + + // InvalidTypeCode is used for any subclass of invalid types + InvalidTypeCode = maximumValidHTTPCode + iota + RequiredFailCode + TooLongFailCode + TooShortFailCode + PatternFailCode + EnumFailCode + MultipleOfFailCode + MaxFailCode + MinFailCode + UniqueFailCode + MaxItemsFailCode + MinItemsFailCode + NoAdditionalItemsCode + TooFewPropertiesCode + TooManyPropertiesCode + UnallowedPropertyCode + FailedAllPatternPropsCode + MultipleOfMustBePositiveCode + ReadOnlyFailCode +) + +// CompositeError is an error that groups several errors together +type CompositeError struct { + Errors []error + code int32 + message string +} + +// Code for this error +func (c *CompositeError) Code() int32 { + return c.code +} + +func (c *CompositeError) Error() string { + if len(c.Errors) > 0 { + msgs := []string{c.message + ":"} + for _, e := range c.Errors { + msgs = append(msgs, e.Error()) + } + return strings.Join(msgs, "\n") + } + return c.message +} + +func (c *CompositeError) Unwrap() []error { + return c.Errors +} + +// MarshalJSON implements the JSON encoding interface +func (c CompositeError) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "code": c.code, + "message": c.message, + "errors": c.Errors, + }) +} + +// CompositeValidationError an error to wrap a bunch of other errors +func CompositeValidationError(errors ...error) *CompositeError { + return &CompositeError{ + code: CompositeErrorCode, + Errors: append(make([]error, 0, len(errors)), errors...), + message: "validation failure list", + } +} + +// ValidateName recursively sets the name for all validations or updates them for nested properties +func (c *CompositeError) ValidateName(name string) *CompositeError { + for i, e := range c.Errors { + if ve, ok := e.(*Validation); ok { + c.Errors[i] = ve.ValidateName(name) + } else if ce, ok := e.(*CompositeError); ok { + c.Errors[i] = ce.ValidateName(name) + } + } + + return c +} + +// FailedAllPatternProperties an error for when the property doesn't match a pattern +func FailedAllPatternProperties(name, in, key string) *Validation { + msg := fmt.Sprintf(failedAllPatternProps, name, key, in) + if in == "" { + msg = fmt.Sprintf(failedAllPatternPropsNoIn, name, key) + } + return &Validation{ + code: FailedAllPatternPropsCode, + Name: name, + In: in, + Value: key, + message: msg, + } +} + +// PropertyNotAllowed an error for when the property doesn't match a pattern +func PropertyNotAllowed(name, in, key string) *Validation { + msg := fmt.Sprintf(unallowedProperty, name, key, in) + if in == "" { + msg = fmt.Sprintf(unallowedPropertyNoIn, name, key) + } + return &Validation{ + code: UnallowedPropertyCode, + Name: name, + In: in, + Value: key, + message: msg, + } +} + +// TooFewProperties an error for an object with too few properties +func TooFewProperties(name, in string, n int64) *Validation { + msg := fmt.Sprintf(tooFewProperties, name, in, n) + if in == "" { + msg = fmt.Sprintf(tooFewPropertiesNoIn, name, n) + } + return &Validation{ + code: TooFewPropertiesCode, + Name: name, + In: in, + Value: n, + message: msg, + } +} + +// TooManyProperties an error for an object with too many properties +func TooManyProperties(name, in string, n int64) *Validation { + msg := fmt.Sprintf(tooManyProperties, name, in, n) + if in == "" { + msg = fmt.Sprintf(tooManyPropertiesNoIn, name, n) + } + return &Validation{ + code: TooManyPropertiesCode, + Name: name, + In: in, + Value: n, + message: msg, + } +} + +// AdditionalItemsNotAllowed an error for invalid additional items +func AdditionalItemsNotAllowed(name, in string) *Validation { + msg := fmt.Sprintf(noAdditionalItems, name, in) + if in == "" { + msg = fmt.Sprintf(noAdditionalItemsNoIn, name) + } + return &Validation{ + code: NoAdditionalItemsCode, + Name: name, + In: in, + message: msg, + } +} + +// InvalidCollectionFormat another flavor of invalid type error +func InvalidCollectionFormat(name, in, format string) *Validation { + return &Validation{ + code: InvalidTypeCode, + Name: name, + In: in, + Value: format, + message: fmt.Sprintf("the collection format %q is not supported for the %s param %q", format, in, name), + } +} + +// InvalidTypeName an error for when the type is invalid +func InvalidTypeName(typeName string) *Validation { + return &Validation{ + code: InvalidTypeCode, + Value: typeName, + message: fmt.Sprintf(invalidType, typeName), + } +} + +// InvalidType creates an error for when the type is invalid +func InvalidType(name, in, typeName string, value interface{}) *Validation { + var message string + + if in != "" { + switch value.(type) { + case string: + message = fmt.Sprintf(typeFailWithData, name, in, typeName, value) + case error: + message = fmt.Sprintf(typeFailWithError, name, in, typeName, value) + default: + message = fmt.Sprintf(typeFail, name, in, typeName) + } + } else { + switch value.(type) { + case string: + message = fmt.Sprintf(typeFailWithDataNoIn, name, typeName, value) + case error: + message = fmt.Sprintf(typeFailWithErrorNoIn, name, typeName, value) + default: + message = fmt.Sprintf(typeFailNoIn, name, typeName) + } + } + + return &Validation{ + code: InvalidTypeCode, + Name: name, + In: in, + Value: value, + message: message, + } + +} + +// DuplicateItems error for when an array contains duplicates +func DuplicateItems(name, in string) *Validation { + msg := fmt.Sprintf(uniqueFail, name, in) + if in == "" { + msg = fmt.Sprintf(uniqueFailNoIn, name) + } + return &Validation{ + code: UniqueFailCode, + Name: name, + In: in, + message: msg, + } +} + +// TooManyItems error for when an array contains too many items +func TooManyItems(name, in string, maximum int64, value interface{}) *Validation { + msg := fmt.Sprintf(maximumItemsFail, name, in, maximum) + if in == "" { + msg = fmt.Sprintf(maximumItemsFailNoIn, name, maximum) + } + + return &Validation{ + code: MaxItemsFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// TooFewItems error for when an array contains too few items +func TooFewItems(name, in string, minimum int64, value interface{}) *Validation { + msg := fmt.Sprintf(minItemsFail, name, in, minimum) + if in == "" { + msg = fmt.Sprintf(minItemsFailNoIn, name, minimum) + } + return &Validation{ + code: MinItemsFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// ExceedsMaximumInt error for when maximumimum validation fails +func ExceedsMaximumInt(name, in string, maximum int64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := maximumIncFailNoIn + if exclusive { + m = maximumExcFailNoIn + } + message = fmt.Sprintf(m, name, maximum) + } else { + m := maximumIncFail + if exclusive { + m = maximumExcFail + } + message = fmt.Sprintf(m, name, in, maximum) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMaximumUint error for when maximumimum validation fails +func ExceedsMaximumUint(name, in string, maximum uint64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := maximumIncFailNoIn + if exclusive { + m = maximumExcFailNoIn + } + message = fmt.Sprintf(m, name, maximum) + } else { + m := maximumIncFail + if exclusive { + m = maximumExcFail + } + message = fmt.Sprintf(m, name, in, maximum) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMaximum error for when maximumimum validation fails +func ExceedsMaximum(name, in string, maximum float64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := maximumIncFailNoIn + if exclusive { + m = maximumExcFailNoIn + } + message = fmt.Sprintf(m, name, maximum) + } else { + m := maximumIncFail + if exclusive { + m = maximumExcFail + } + message = fmt.Sprintf(m, name, in, maximum) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimumInt error for when minimum validation fails +func ExceedsMinimumInt(name, in string, minimum int64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, minimum) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, minimum) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimumUint error for when minimum validation fails +func ExceedsMinimumUint(name, in string, minimum uint64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, minimum) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, minimum) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimum error for when minimum validation fails +func ExceedsMinimum(name, in string, minimum float64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, minimum) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, minimum) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// NotMultipleOf error for when multiple of validation fails +func NotMultipleOf(name, in string, multiple, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple) + } else { + msg = fmt.Sprintf(multipleOfFail, name, in, multiple) + } + return &Validation{ + code: MultipleOfFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// EnumFail error for when an enum validation fails +func EnumFail(name, in string, value interface{}, values []interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(enumFailNoIn, name, values) + } else { + msg = fmt.Sprintf(enumFail, name, in, values) + } + + return &Validation{ + code: EnumFailCode, + Name: name, + In: in, + Value: value, + Values: values, + message: msg, + } +} + +// Required error for when a value is missing +func Required(name, in string, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(requiredFailNoIn, name) + } else { + msg = fmt.Sprintf(requiredFail, name, in) + } + return &Validation{ + code: RequiredFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// ReadOnly error for when a value is present in request +func ReadOnly(name, in string, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(readOnlyFailNoIn, name) + } else { + msg = fmt.Sprintf(readOnlyFail, name, in) + } + return &Validation{ + code: ReadOnlyFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// TooLong error for when a string is too long +func TooLong(name, in string, maximum int64, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(tooLongMessageNoIn, name, maximum) + } else { + msg = fmt.Sprintf(tooLongMessage, name, in, maximum) + } + return &Validation{ + code: TooLongFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// TooShort error for when a string is too short +func TooShort(name, in string, minimum int64, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(tooShortMessageNoIn, name, minimum) + } else { + msg = fmt.Sprintf(tooShortMessage, name, in, minimum) + } + + return &Validation{ + code: TooShortFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// FailedPattern error for when a string fails a regex pattern match +// the pattern that is returned is the ECMA syntax version of the pattern not the golang version. +func FailedPattern(name, in, pattern string, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(patternFailNoIn, name, pattern) + } else { + msg = fmt.Sprintf(patternFail, name, in, pattern) + } + + return &Validation{ + code: PatternFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// MultipleOfMustBePositive error for when a +// multipleOf factor is negative +func MultipleOfMustBePositive(name, in string, factor interface{}) *Validation { + return &Validation{ + code: MultipleOfMustBePositiveCode, + Name: name, + In: in, + Value: factor, + message: fmt.Sprintf(multipleOfMustBePositive, name, factor), + } +} diff --git a/vendor/github.com/go-openapi/loads/.editorconfig b/vendor/github.com/go-openapi/loads/.editorconfig new file mode 100644 index 0000000000..3152da69a5 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore new file mode 100644 index 0000000000..e4f15f17bf --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.gitignore @@ -0,0 +1,4 @@ +secrets.yml +coverage.out +profile.cov +profile.out diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml new file mode 100644 index 0000000000..22f8d21cca --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml new file mode 100644 index 0000000000..cd4a7c331b --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.travis.yml @@ -0,0 +1,25 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.16.x +- 1.x +install: +- go get gotest.tools/gotestsum +language: go +arch: +- amd64 +- ppc64le +jobs: + include: + # include linting job, but only for latest go version and amd64 arch + - go: 1.x + arch: amd64 + install: + go get github.com/golangci/golangci-lint/cmd/golangci-lint + script: + - golangci-lint run --new-from-rev master +notifications: + slack: + secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= +script: +- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/loads/LICENSE b/vendor/github.com/go-openapi/loads/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md new file mode 100644 index 0000000000..f8bd440dfc --- /dev/null +++ b/vendor/github.com/go-openapi/loads/README.md @@ -0,0 +1,6 @@ +# Loads OAI specs [![Build Status](https://github.com/go-openapi/loads/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) + +Loading of OAI specification documents from local or remote locations. Supports JSON and YAML documents. diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go new file mode 100644 index 0000000000..5bcaef5dbc --- /dev/null +++ b/vendor/github.com/go-openapi/loads/doc.go @@ -0,0 +1,18 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package loads provides document loading methods for swagger (OAI) specifications. +// +// It is used by other go-openapi packages to load and run analysis on local or remote spec documents. +package loads diff --git a/vendor/github.com/go-openapi/loads/loaders.go b/vendor/github.com/go-openapi/loads/loaders.go new file mode 100644 index 0000000000..b2d1e034c5 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/loaders.go @@ -0,0 +1,133 @@ +package loads + +import ( + "encoding/json" + "errors" + "net/url" + + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +var ( + // Default chain of loaders, defined at the package level. + // + // By default this matches json and yaml documents. + // + // May be altered with AddLoader(). + loaders *loader +) + +func init() { + jsonLoader := &loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Match: func(_ string) bool { + return true + }, + Fn: JSONDoc, + }, + } + + loaders = jsonLoader.WithHead(&loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Match: swag.YAMLMatcher, + Fn: swag.YAMLDoc, + }, + }) + + // sets the global default loader for go-openapi/spec + spec.PathLoader = loaders.Load +} + +// DocLoader represents a doc loader type +type DocLoader func(string) (json.RawMessage, error) + +// DocMatcher represents a predicate to check if a loader matches +type DocMatcher func(string) bool + +// DocLoaderWithMatch describes a loading function for a given extension match. +type DocLoaderWithMatch struct { + Fn DocLoader + Match DocMatcher +} + +// NewDocLoaderWithMatch builds a DocLoaderWithMatch to be used in load options +func NewDocLoaderWithMatch(fn DocLoader, matcher DocMatcher) DocLoaderWithMatch { + return DocLoaderWithMatch{ + Fn: fn, + Match: matcher, + } +} + +type loader struct { + DocLoaderWithMatch + Next *loader +} + +// WithHead adds a loader at the head of the current stack +func (l *loader) WithHead(head *loader) *loader { + if head == nil { + return l + } + head.Next = l + return head +} + +// WithNext adds a loader at the trail of the current stack +func (l *loader) WithNext(next *loader) *loader { + l.Next = next + return next +} + +// Load the raw document from path +func (l *loader) Load(path string) (json.RawMessage, error) { + _, erp := url.Parse(path) + if erp != nil { + return nil, erp + } + + lastErr := errors.New("no loader matched") // default error if no match was found + for ldr := l; ldr != nil; ldr = ldr.Next { + if ldr.Match != nil && !ldr.Match(path) { + continue + } + + // try then move to next one if there is an error + b, err := ldr.Fn(path) + if err == nil { + return b, nil + } + + lastErr = err + } + + return nil, lastErr +} + +// JSONDoc loads a json document from either a file or a remote url +func JSONDoc(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil +} + +// AddLoader for a document, executed before other previously set loaders. +// +// This sets the configuration at the package level. +// +// NOTE: +// - this updates the default loader used by github.com/go-openapi/spec +// - since this sets package level globals, you shouln't call this concurrently +func AddLoader(predicate DocMatcher, load DocLoader) { + loaders = loaders.WithHead(&loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Match: predicate, + Fn: load, + }, + }) + + // sets the global default loader for go-openapi/spec + spec.PathLoader = loaders.Load +} diff --git a/vendor/github.com/go-openapi/loads/options.go b/vendor/github.com/go-openapi/loads/options.go new file mode 100644 index 0000000000..f8305d5607 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/options.go @@ -0,0 +1,61 @@ +package loads + +type options struct { + loader *loader +} + +func defaultOptions() *options { + return &options{ + loader: loaders, + } +} + +func loaderFromOptions(options []LoaderOption) *loader { + opts := defaultOptions() + for _, apply := range options { + apply(opts) + } + + return opts.loader +} + +// LoaderOption allows to fine-tune the spec loader behavior +type LoaderOption func(*options) + +// WithDocLoader sets a custom loader for loading specs +func WithDocLoader(l DocLoader) LoaderOption { + return func(opt *options) { + if l == nil { + return + } + opt.loader = &loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Fn: l, + }, + } + } +} + +// WithDocLoaderMatches sets a chain of custom loaders for loading specs +// for different extension matches. +// +// Loaders are executed in the order of provided DocLoaderWithMatch'es. +func WithDocLoaderMatches(l ...DocLoaderWithMatch) LoaderOption { + return func(opt *options) { + var final, prev *loader + for _, ldr := range l { + if ldr.Fn == nil { + continue + } + + if prev == nil { + final = &loader{DocLoaderWithMatch: ldr} + prev = final + continue + } + + prev = prev.WithNext(&loader{DocLoaderWithMatch: ldr}) + } + opt.loader = final + } +} diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go new file mode 100644 index 0000000000..c9039cd5d7 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/spec.go @@ -0,0 +1,275 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loads + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +func init() { + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) +} + +// Document represents a swagger spec document +type Document struct { + // specAnalyzer + Analyzer *analysis.Spec + spec *spec.Swagger + specFilePath string + origSpec *spec.Swagger + schema *spec.Schema + pathLoader *loader + raw json.RawMessage +} + +// JSONSpec loads a spec from a json document +func JSONSpec(path string, options ...LoaderOption) (*Document, error) { + data, err := JSONDoc(path) + if err != nil { + return nil, err + } + // convert to json + doc, err := Analyzed(data, "", options...) + if err != nil { + return nil, err + } + + doc.specFilePath = path + + return doc, nil +} + +// Embedded returns a Document based on embedded specs. No analysis is required +func Embedded(orig, flat json.RawMessage, options ...LoaderOption) (*Document, error) { + var origSpec, flatSpec spec.Swagger + if err := json.Unmarshal(orig, &origSpec); err != nil { + return nil, err + } + if err := json.Unmarshal(flat, &flatSpec); err != nil { + return nil, err + } + return &Document{ + raw: orig, + origSpec: &origSpec, + spec: &flatSpec, + pathLoader: loaderFromOptions(options), + }, nil +} + +// Spec loads a new spec document from a local or remote path +func Spec(path string, options ...LoaderOption) (*Document, error) { + ldr := loaderFromOptions(options) + + b, err := ldr.Load(path) + if err != nil { + return nil, err + } + + document, err := Analyzed(b, "", options...) + if err != nil { + return nil, err + } + + document.specFilePath = path + document.pathLoader = ldr + + return document, nil +} + +// Analyzed creates a new analyzed spec document for a root json.RawMessage. +func Analyzed(data json.RawMessage, version string, options ...LoaderOption) (*Document, error) { + if version == "" { + version = "2.0" + } + if version != "2.0" { + return nil, fmt.Errorf("spec version %q is not supported", version) + } + + raw, err := trimData(data) // trim blanks, then convert yaml docs into json + if err != nil { + return nil, err + } + + swspec := new(spec.Swagger) + if err = json.Unmarshal(raw, swspec); err != nil { + return nil, err + } + + origsqspec, err := cloneSpec(swspec) + if err != nil { + return nil, err + } + + d := &Document{ + Analyzer: analysis.New(swspec), // NOTE: at this moment, analysis does not follow $refs to documents outside the root doc + schema: spec.MustLoadSwagger20Schema(), + spec: swspec, + raw: raw, + origSpec: origsqspec, + pathLoader: loaderFromOptions(options), + } + + return d, nil +} + +func trimData(in json.RawMessage) (json.RawMessage, error) { + trimmed := bytes.TrimSpace(in) + if len(trimmed) == 0 { + return in, nil + } + + if trimmed[0] == '{' || trimmed[0] == '[' { + return trimmed, nil + } + + // assume yaml doc: convert it to json + yml, err := swag.BytesToYAMLDoc(trimmed) + if err != nil { + return nil, fmt.Errorf("analyzed: %v", err) + } + + d, err := swag.YAMLToJSON(yml) + if err != nil { + return nil, fmt.Errorf("analyzed: %v", err) + } + + return d, nil +} + +// Expanded expands the $ref fields in the spec document and returns a new spec document +func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { + swspec := new(spec.Swagger) + if err := json.Unmarshal(d.raw, swspec); err != nil { + return nil, err + } + + var expandOptions *spec.ExpandOptions + if len(options) > 0 { + expandOptions = options[0] + if expandOptions.RelativeBase == "" { + expandOptions.RelativeBase = d.specFilePath + } + } else { + expandOptions = &spec.ExpandOptions{ + RelativeBase: d.specFilePath, + } + } + + if expandOptions.PathLoader == nil { + if d.pathLoader != nil { + // use loader from Document options + expandOptions.PathLoader = d.pathLoader.Load + } else { + // use package level loader + expandOptions.PathLoader = loaders.Load + } + } + + if err := spec.ExpandSpec(swspec, expandOptions); err != nil { + return nil, err + } + + dd := &Document{ + Analyzer: analysis.New(swspec), + spec: swspec, + specFilePath: d.specFilePath, + schema: spec.MustLoadSwagger20Schema(), + raw: d.raw, + origSpec: d.origSpec, + } + return dd, nil +} + +// BasePath the base path for the API specified by this spec +func (d *Document) BasePath() string { + return d.spec.BasePath +} + +// Version returns the version of this spec +func (d *Document) Version() string { + return d.spec.Swagger +} + +// Schema returns the swagger 2.0 schema +func (d *Document) Schema() *spec.Schema { + return d.schema +} + +// Spec returns the swagger spec object model +func (d *Document) Spec() *spec.Swagger { + return d.spec +} + +// Host returns the host for the API +func (d *Document) Host() string { + return d.spec.Host +} + +// Raw returns the raw swagger spec as json bytes +func (d *Document) Raw() json.RawMessage { + return d.raw +} + +// OrigSpec yields the original spec +func (d *Document) OrigSpec() *spec.Swagger { + return d.origSpec +} + +// ResetDefinitions gives a shallow copy with the models reset to the original spec +func (d *Document) ResetDefinitions() *Document { + defs := make(map[string]spec.Schema, len(d.origSpec.Definitions)) + for k, v := range d.origSpec.Definitions { + defs[k] = v + } + + d.spec.Definitions = defs + return d +} + +// Pristine creates a new pristine document instance based on the input data +func (d *Document) Pristine() *Document { + raw, _ := json.Marshal(d.Spec()) + dd, _ := Analyzed(raw, d.Version()) + dd.pathLoader = d.pathLoader + dd.specFilePath = d.specFilePath + + return dd +} + +// SpecFilePath returns the file path of the spec if one is defined +func (d *Document) SpecFilePath() string { + return d.specFilePath +} + +func cloneSpec(src *spec.Swagger) (*spec.Swagger, error) { + var b bytes.Buffer + if err := gob.NewEncoder(&b).Encode(src); err != nil { + return nil, err + } + + var dst spec.Swagger + if err := gob.NewDecoder(&b).Decode(&dst); err != nil { + return nil, err + } + return &dst, nil +} diff --git a/vendor/github.com/go-openapi/runtime/.editorconfig b/vendor/github.com/go-openapi/runtime/.editorconfig new file mode 100644 index 0000000000..3152da69a5 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/runtime/.gitattributes b/vendor/github.com/go-openapi/runtime/.gitattributes new file mode 100644 index 0000000000..d207b1802b --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf diff --git a/vendor/github.com/go-openapi/runtime/.gitignore b/vendor/github.com/go-openapi/runtime/.gitignore new file mode 100644 index 0000000000..fea8b84eca --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +*.cov +*.out +playground diff --git a/vendor/github.com/go-openapi/runtime/.golangci.yml b/vendor/github.com/go-openapi/runtime/.golangci.yml new file mode 100644 index 0000000000..1c75557bac --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.golangci.yml @@ -0,0 +1,62 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - nilerr # nilerr crashes on this repo + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/runtime/LICENSE b/vendor/github.com/go-openapi/runtime/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/runtime/README.md b/vendor/github.com/go-openapi/runtime/README.md new file mode 100644 index 0000000000..b07e0ad9d6 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/README.md @@ -0,0 +1,10 @@ +# runtime [![Build Status](https://github.com/go-openapi/runtime/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/runtime/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/runtime.svg)](https://pkg.go.dev/github.com/go-openapi/runtime) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/runtime)](https://goreportcard.com/report/github.com/go-openapi/runtime) + +# go OpenAPI toolkit runtime + +The runtime component for use in code generation or as untyped usage. diff --git a/vendor/github.com/go-openapi/runtime/bytestream.go b/vendor/github.com/go-openapi/runtime/bytestream.go new file mode 100644 index 0000000000..f8fb482232 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/bytestream.go @@ -0,0 +1,222 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + + "github.com/go-openapi/swag" +) + +func defaultCloser() error { return nil } + +type byteStreamOpt func(opts *byteStreamOpts) + +// ClosesStream when the bytestream consumer or producer is finished +func ClosesStream(opts *byteStreamOpts) { + opts.Close = true +} + +type byteStreamOpts struct { + Close bool +} + +// ByteStreamConsumer creates a consumer for byte streams. +// +// The consumer consumes from a provided reader into the data passed by reference. +// +// Supported output underlying types and interfaces, prioritized in this order: +// - io.ReaderFrom (for maximum control) +// - io.Writer (performs io.Copy) +// - encoding.BinaryUnmarshaler +// - *string +// - *[]byte +func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { + var vals byteStreamOpts + for _, opt := range opts { + opt(&vals) + } + + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("ByteStreamConsumer requires a reader") // early exit + } + if data == nil { + return errors.New("nil destination for ByteStreamConsumer") + } + + closer := defaultCloser + if vals.Close { + if cl, isReaderCloser := reader.(io.Closer); isReaderCloser { + closer = cl.Close + } + } + defer func() { + _ = closer() + }() + + if readerFrom, isReaderFrom := data.(io.ReaderFrom); isReaderFrom { + _, err := readerFrom.ReadFrom(reader) + return err + } + + if writer, isDataWriter := data.(io.Writer); isDataWriter { + _, err := io.Copy(writer, reader) + return err + } + + // buffers input before writing to data + var buf bytes.Buffer + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + + switch destinationPointer := data.(type) { + case encoding.BinaryUnmarshaler: + return destinationPointer.UnmarshalBinary(b) + case *any: + switch (*destinationPointer).(type) { + case string: + *destinationPointer = string(b) + + return nil + + case []byte: + *destinationPointer = b + + return nil + } + default: + // check for the underlying type to be pointer to []byte or string, + if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr { + return errors.New("destination must be a pointer") + } + + v := reflect.Indirect(reflect.ValueOf(data)) + t := v.Type() + + switch { + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + v.SetBytes(b) + return nil + + case t.Kind() == reflect.String: + v.SetString(string(b)) + return nil + } + } + + return fmt.Errorf("%v (%T) is not supported by the ByteStreamConsumer, %s", + data, data, "can be resolved by supporting Writer/BinaryUnmarshaler interface") + }) +} + +// ByteStreamProducer creates a producer for byte streams. +// +// The producer takes input data then writes to an output writer (essentially as a pipe). +// +// Supported input underlying types and interfaces, prioritized in this order: +// - io.WriterTo (for maximum control) +// - io.Reader (performs io.Copy). A ReadCloser is closed before exiting. +// - encoding.BinaryMarshaler +// - error (writes as a string) +// - []byte +// - string +// - struct, other slices: writes as JSON +func ByteStreamProducer(opts ...byteStreamOpt) Producer { + var vals byteStreamOpts + for _, opt := range opts { + opt(&vals) + } + + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("ByteStreamProducer requires a writer") // early exit + } + if data == nil { + return errors.New("nil data for ByteStreamProducer") + } + + closer := defaultCloser + if vals.Close { + if cl, isWriterCloser := writer.(io.Closer); isWriterCloser { + closer = cl.Close + } + } + defer func() { + _ = closer() + }() + + if rc, isDataCloser := data.(io.ReadCloser); isDataCloser { + defer rc.Close() + } + + switch origin := data.(type) { + case io.WriterTo: + _, err := origin.WriteTo(writer) + return err + + case io.Reader: + _, err := io.Copy(writer, origin) + return err + + case encoding.BinaryMarshaler: + bytes, err := origin.MarshalBinary() + if err != nil { + return err + } + + _, err = writer.Write(bytes) + return err + + case error: + _, err := writer.Write([]byte(origin.Error())) + return err + + default: + v := reflect.Indirect(reflect.ValueOf(data)) + t := v.Type() + + switch { + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + _, err := writer.Write(v.Bytes()) + return err + + case t.Kind() == reflect.String: + _, err := writer.Write([]byte(v.String())) + return err + + case t.Kind() == reflect.Struct || t.Kind() == reflect.Slice: + b, err := swag.WriteJSON(data) + if err != nil { + return err + } + + _, err = writer.Write(b) + return err + } + } + + return fmt.Errorf("%v (%T) is not supported by the ByteStreamProducer, %s", + data, data, "can be resolved by supporting Reader/BinaryMarshaler interface") + }) +} diff --git a/vendor/github.com/go-openapi/runtime/client_auth_info.go b/vendor/github.com/go-openapi/runtime/client_auth_info.go new file mode 100644 index 0000000000..c6c97d9a7c --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_auth_info.go @@ -0,0 +1,30 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import "github.com/go-openapi/strfmt" + +// A ClientAuthInfoWriterFunc converts a function to a request writer interface +type ClientAuthInfoWriterFunc func(ClientRequest, strfmt.Registry) error + +// AuthenticateRequest adds authentication data to the request +func (fn ClientAuthInfoWriterFunc) AuthenticateRequest(req ClientRequest, reg strfmt.Registry) error { + return fn(req, reg) +} + +// A ClientAuthInfoWriter implementor knows how to write authentication info to a request +type ClientAuthInfoWriter interface { + AuthenticateRequest(ClientRequest, strfmt.Registry) error +} diff --git a/vendor/github.com/go-openapi/runtime/client_operation.go b/vendor/github.com/go-openapi/runtime/client_operation.go new file mode 100644 index 0000000000..5a5d63563a --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_operation.go @@ -0,0 +1,41 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "context" + "net/http" +) + +// ClientOperation represents the context for a swagger operation to be submitted to the transport +type ClientOperation struct { + ID string + Method string + PathPattern string + ProducesMediaTypes []string + ConsumesMediaTypes []string + Schemes []string + AuthInfo ClientAuthInfoWriter + Params ClientRequestWriter + Reader ClientResponseReader + Context context.Context //nolint:containedctx // we precisely want this type to contain the request context + Client *http.Client +} + +// A ClientTransport implementor knows how to submit Request objects to some destination +type ClientTransport interface { + // Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) + Submit(*ClientOperation) (interface{}, error) +} diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go new file mode 100644 index 0000000000..4ebb2deabe --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_request.go @@ -0,0 +1,152 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "io" + "net/http" + "net/url" + "time" + + "github.com/go-openapi/strfmt" +) + +// ClientRequestWriterFunc converts a function to a request writer interface +type ClientRequestWriterFunc func(ClientRequest, strfmt.Registry) error + +// WriteToRequest adds data to the request +func (fn ClientRequestWriterFunc) WriteToRequest(req ClientRequest, reg strfmt.Registry) error { + return fn(req, reg) +} + +// ClientRequestWriter is an interface for things that know how to write to a request +type ClientRequestWriter interface { + WriteToRequest(ClientRequest, strfmt.Registry) error +} + +// ClientRequest is an interface for things that know how to +// add information to a swagger client request. +type ClientRequest interface { //nolint:interfacebloat // a swagger-capable request is quite rich, hence the many getter/setters + SetHeaderParam(string, ...string) error + + GetHeaderParams() http.Header + + SetQueryParam(string, ...string) error + + SetFormParam(string, ...string) error + + SetPathParam(string, string) error + + GetQueryParams() url.Values + + SetFileParam(string, ...NamedReadCloser) error + + SetBodyParam(interface{}) error + + SetTimeout(time.Duration) error + + GetMethod() string + + GetPath() string + + GetBody() []byte + + GetBodyParam() interface{} + + GetFileParam() map[string][]NamedReadCloser +} + +// NamedReadCloser represents a named ReadCloser interface +type NamedReadCloser interface { + io.ReadCloser + Name() string +} + +// NamedReader creates a NamedReadCloser for use as file upload +func NamedReader(name string, rdr io.Reader) NamedReadCloser { + rc, ok := rdr.(io.ReadCloser) + if !ok { + rc = io.NopCloser(rdr) + } + return &namedReadCloser{ + name: name, + cr: rc, + } +} + +type namedReadCloser struct { + name string + cr io.ReadCloser +} + +func (n *namedReadCloser) Close() error { + return n.cr.Close() +} +func (n *namedReadCloser) Read(p []byte) (int, error) { + return n.cr.Read(p) +} +func (n *namedReadCloser) Name() string { + return n.name +} + +type TestClientRequest struct { + Headers http.Header + Body interface{} +} + +func (t *TestClientRequest) SetHeaderParam(name string, values ...string) error { + if t.Headers == nil { + t.Headers = make(http.Header) + } + t.Headers.Set(name, values[0]) + return nil +} + +func (t *TestClientRequest) SetQueryParam(_ string, _ ...string) error { return nil } + +func (t *TestClientRequest) SetFormParam(_ string, _ ...string) error { return nil } + +func (t *TestClientRequest) SetPathParam(_ string, _ string) error { return nil } + +func (t *TestClientRequest) SetFileParam(_ string, _ ...NamedReadCloser) error { return nil } + +func (t *TestClientRequest) SetBodyParam(body interface{}) error { + t.Body = body + return nil +} + +func (t *TestClientRequest) SetTimeout(time.Duration) error { + return nil +} + +func (t *TestClientRequest) GetQueryParams() url.Values { return nil } + +func (t *TestClientRequest) GetMethod() string { return "" } + +func (t *TestClientRequest) GetPath() string { return "" } + +func (t *TestClientRequest) GetBody() []byte { return nil } + +func (t *TestClientRequest) GetBodyParam() interface{} { + return t.Body +} + +func (t *TestClientRequest) GetFileParam() map[string][]NamedReadCloser { + return nil +} + +func (t *TestClientRequest) GetHeaderParams() http.Header { + return t.Headers +} diff --git a/vendor/github.com/go-openapi/runtime/client_response.go b/vendor/github.com/go-openapi/runtime/client_response.go new file mode 100644 index 0000000000..0d1691149d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_response.go @@ -0,0 +1,110 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/json" + "fmt" + "io" +) + +// A ClientResponse represents a client response +// This bridges between responses obtained from different transports +type ClientResponse interface { + Code() int + Message() string + GetHeader(string) string + GetHeaders(string) []string + Body() io.ReadCloser +} + +// A ClientResponseReaderFunc turns a function into a ClientResponseReader interface implementation +type ClientResponseReaderFunc func(ClientResponse, Consumer) (interface{}, error) + +// ReadResponse reads the response +func (read ClientResponseReaderFunc) ReadResponse(resp ClientResponse, consumer Consumer) (interface{}, error) { + return read(resp, consumer) +} + +// A ClientResponseReader is an interface for things want to read a response. +// An application of this is to create structs from response values +type ClientResponseReader interface { + ReadResponse(ClientResponse, Consumer) (interface{}, error) +} + +// NewAPIError creates a new API error +func NewAPIError(opName string, payload interface{}, code int) *APIError { + return &APIError{ + OperationName: opName, + Response: payload, + Code: code, + } +} + +// APIError wraps an error model and captures the status code +type APIError struct { + OperationName string + Response interface{} + Code int +} + +func (o *APIError) Error() string { + var resp []byte + if err, ok := o.Response.(error); ok { + resp = []byte("'" + err.Error() + "'") + } else { + resp, _ = json.Marshal(o.Response) + } + return fmt.Sprintf("%s (status %d): %s", o.OperationName, o.Code, resp) +} + +func (o *APIError) String() string { + return o.Error() +} + +// IsSuccess returns true when this elapse o k response returns a 2xx status code +func (o *APIError) IsSuccess() bool { + return o.Code/100 == 2 +} + +// IsRedirect returns true when this elapse o k response returns a 3xx status code +func (o *APIError) IsRedirect() bool { + return o.Code/100 == 3 +} + +// IsClientError returns true when this elapse o k response returns a 4xx status code +func (o *APIError) IsClientError() bool { + return o.Code/100 == 4 +} + +// IsServerError returns true when this elapse o k response returns a 5xx status code +func (o *APIError) IsServerError() bool { + return o.Code/100 == 5 +} + +// IsCode returns true when this elapse o k response returns a 4xx status code +func (o *APIError) IsCode(code int) bool { + return o.Code == code +} + +// A ClientResponseStatus is a common interface implemented by all responses on the generated code +// You can use this to treat any client response based on status code +type ClientResponseStatus interface { + IsSuccess() bool + IsRedirect() bool + IsClientError() bool + IsServerError() bool + IsCode(int) bool +} diff --git a/vendor/github.com/go-openapi/runtime/constants.go b/vendor/github.com/go-openapi/runtime/constants.go new file mode 100644 index 0000000000..515969242c --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/constants.go @@ -0,0 +1,49 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +const ( + // HeaderContentType represents a http content-type header, it's value is supposed to be a mime type + HeaderContentType = "Content-Type" + + // HeaderTransferEncoding represents a http transfer-encoding header. + HeaderTransferEncoding = "Transfer-Encoding" + + // HeaderAccept the Accept header + HeaderAccept = "Accept" + // HeaderAuthorization the Authorization header + HeaderAuthorization = "Authorization" + + charsetKey = "charset" + + // DefaultMime the default fallback mime type + DefaultMime = "application/octet-stream" + // JSONMime the json mime type + JSONMime = "application/json" + // YAMLMime the yaml mime type + YAMLMime = "application/x-yaml" + // XMLMime the xml mime type + XMLMime = "application/xml" + // TextMime the text mime type + TextMime = "text/plain" + // HTMLMime the html mime type + HTMLMime = "text/html" + // CSVMime the csv mime type + CSVMime = "text/csv" + // MultipartFormMime the multipart form mime type + MultipartFormMime = "multipart/form-data" + // URLencodedFormMime the url encoded form mime type + URLencodedFormMime = "application/x-www-form-urlencoded" +) diff --git a/vendor/github.com/go-openapi/runtime/csv.go b/vendor/github.com/go-openapi/runtime/csv.go new file mode 100644 index 0000000000..c9597bcd6e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/csv.go @@ -0,0 +1,350 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "context" + "encoding" + "encoding/csv" + "errors" + "fmt" + "io" + "reflect" + + "golang.org/x/sync/errgroup" +) + +// CSVConsumer creates a new CSV consumer. +// +// The consumer consumes CSV records from a provided reader into the data passed by reference. +// +// CSVOpts options may be specified to alter the default CSV behavior on the reader and the writer side (e.g. separator, skip header, ...). +// The defaults are those of the standard library's csv.Reader and csv.Writer. +// +// Supported output underlying types and interfaces, prioritized in this order: +// - *csv.Writer +// - CSVWriter (writer options are ignored) +// - io.Writer (as raw bytes) +// - io.ReaderFrom (as raw bytes) +// - encoding.BinaryUnmarshaler (as raw bytes) +// - *[][]string (as a collection of records) +// - *[]byte (as raw bytes) +// - *string (a raw bytes) +// +// The consumer prioritizes situations where buffering the input is not required. +func CSVConsumer(opts ...CSVOpt) Consumer { + o := csvOptsWithDefaults(opts) + + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("CSVConsumer requires a reader") + } + if data == nil { + return errors.New("nil destination for CSVConsumer") + } + + csvReader := csv.NewReader(reader) + o.applyToReader(csvReader) + closer := defaultCloser + if o.closeStream { + if cl, isReaderCloser := reader.(io.Closer); isReaderCloser { + closer = cl.Close + } + } + defer func() { + _ = closer() + }() + + switch destination := data.(type) { + case *csv.Writer: + csvWriter := destination + o.applyToWriter(csvWriter) + + return pipeCSV(csvWriter, csvReader, o) + + case CSVWriter: + csvWriter := destination + // no writer options available + + return pipeCSV(csvWriter, csvReader, o) + + case io.Writer: + csvWriter := csv.NewWriter(destination) + o.applyToWriter(csvWriter) + + return pipeCSV(csvWriter, csvReader, o) + + case io.ReaderFrom: + var buf bytes.Buffer + csvWriter := csv.NewWriter(&buf) + o.applyToWriter(csvWriter) + if err := bufferedCSV(csvWriter, csvReader, o); err != nil { + return err + } + _, err := destination.ReadFrom(&buf) + + return err + + case encoding.BinaryUnmarshaler: + var buf bytes.Buffer + csvWriter := csv.NewWriter(&buf) + o.applyToWriter(csvWriter) + if err := bufferedCSV(csvWriter, csvReader, o); err != nil { + return err + } + + return destination.UnmarshalBinary(buf.Bytes()) + + default: + // support *[][]string, *[]byte, *string + if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr { + return errors.New("destination must be a pointer") + } + + v := reflect.Indirect(reflect.ValueOf(data)) + t := v.Type() + + switch { + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String: + csvWriter := &csvRecordsWriter{} + // writer options are ignored + if err := pipeCSV(csvWriter, csvReader, o); err != nil { + return err + } + + v.Grow(len(csvWriter.records)) + v.SetCap(len(csvWriter.records)) // in case Grow was unnessary, trim down the capacity + v.SetLen(len(csvWriter.records)) + reflect.Copy(v, reflect.ValueOf(csvWriter.records)) + + return nil + + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + var buf bytes.Buffer + csvWriter := csv.NewWriter(&buf) + o.applyToWriter(csvWriter) + if err := bufferedCSV(csvWriter, csvReader, o); err != nil { + return err + } + v.SetBytes(buf.Bytes()) + + return nil + + case t.Kind() == reflect.String: + var buf bytes.Buffer + csvWriter := csv.NewWriter(&buf) + o.applyToWriter(csvWriter) + if err := bufferedCSV(csvWriter, csvReader, o); err != nil { + return err + } + v.SetString(buf.String()) + + return nil + + default: + return fmt.Errorf("%v (%T) is not supported by the CSVConsumer, %s", + data, data, "can be resolved by supporting CSVWriter/Writer/BinaryUnmarshaler interface", + ) + } + } + }) +} + +// CSVProducer creates a new CSV producer. +// +// The producer takes input data then writes as CSV to an output writer (essentially as a pipe). +// +// Supported input underlying types and interfaces, prioritized in this order: +// - *csv.Reader +// - CSVReader (reader options are ignored) +// - io.Reader +// - io.WriterTo +// - encoding.BinaryMarshaler +// - [][]string +// - []byte +// - string +// +// The producer prioritizes situations where buffering the input is not required. +func CSVProducer(opts ...CSVOpt) Producer { + o := csvOptsWithDefaults(opts) + + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("CSVProducer requires a writer") + } + if data == nil { + return errors.New("nil data for CSVProducer") + } + + csvWriter := csv.NewWriter(writer) + o.applyToWriter(csvWriter) + closer := defaultCloser + if o.closeStream { + if cl, isWriterCloser := writer.(io.Closer); isWriterCloser { + closer = cl.Close + } + } + defer func() { + _ = closer() + }() + + if rc, isDataCloser := data.(io.ReadCloser); isDataCloser { + defer rc.Close() + } + + switch origin := data.(type) { + case *csv.Reader: + csvReader := origin + o.applyToReader(csvReader) + + return pipeCSV(csvWriter, csvReader, o) + + case CSVReader: + csvReader := origin + // no reader options available + + return pipeCSV(csvWriter, csvReader, o) + + case io.Reader: + csvReader := csv.NewReader(origin) + o.applyToReader(csvReader) + + return pipeCSV(csvWriter, csvReader, o) + + case io.WriterTo: + // async piping of the writes performed by WriteTo + r, w := io.Pipe() + csvReader := csv.NewReader(r) + o.applyToReader(csvReader) + + pipe, _ := errgroup.WithContext(context.Background()) + pipe.Go(func() error { + _, err := origin.WriteTo(w) + _ = w.Close() + return err + }) + + pipe.Go(func() error { + defer func() { + _ = r.Close() + }() + + return pipeCSV(csvWriter, csvReader, o) + }) + + return pipe.Wait() + + case encoding.BinaryMarshaler: + buf, err := origin.MarshalBinary() + if err != nil { + return err + } + rdr := bytes.NewBuffer(buf) + csvReader := csv.NewReader(rdr) + + return bufferedCSV(csvWriter, csvReader, o) + + default: + // support [][]string, []byte, string (or pointers to those) + v := reflect.Indirect(reflect.ValueOf(data)) + t := v.Type() + + switch { + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String: + csvReader := &csvRecordsWriter{ + records: make([][]string, v.Len()), + } + reflect.Copy(reflect.ValueOf(csvReader.records), v) + + return pipeCSV(csvWriter, csvReader, o) + + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + buf := bytes.NewBuffer(v.Bytes()) + csvReader := csv.NewReader(buf) + o.applyToReader(csvReader) + + return bufferedCSV(csvWriter, csvReader, o) + + case t.Kind() == reflect.String: + buf := bytes.NewBufferString(v.String()) + csvReader := csv.NewReader(buf) + o.applyToReader(csvReader) + + return bufferedCSV(csvWriter, csvReader, o) + + default: + return fmt.Errorf("%v (%T) is not supported by the CSVProducer, %s", + data, data, "can be resolved by supporting CSVReader/Reader/BinaryMarshaler interface", + ) + } + } + }) +} + +// pipeCSV copies CSV records from a CSV reader to a CSV writer +func pipeCSV(csvWriter CSVWriter, csvReader CSVReader, opts csvOpts) error { + for ; opts.skippedLines > 0; opts.skippedLines-- { + _, err := csvReader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + + return err + } + } + + for { + record, err := csvReader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + + return err + } + + if err := csvWriter.Write(record); err != nil { + return err + } + } + + csvWriter.Flush() + + return csvWriter.Error() +} + +// bufferedCSV copies CSV records from a CSV reader to a CSV writer, +// by first reading all records then writing them at once. +func bufferedCSV(csvWriter *csv.Writer, csvReader *csv.Reader, opts csvOpts) error { + for ; opts.skippedLines > 0; opts.skippedLines-- { + _, err := csvReader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + + return err + } + } + + records, err := csvReader.ReadAll() + if err != nil { + return err + } + + return csvWriter.WriteAll(records) +} diff --git a/vendor/github.com/go-openapi/runtime/csv_options.go b/vendor/github.com/go-openapi/runtime/csv_options.go new file mode 100644 index 0000000000..c16464c578 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/csv_options.go @@ -0,0 +1,121 @@ +package runtime + +import ( + "encoding/csv" + "io" +) + +// CSVOpts alter the behavior of the CSV consumer or producer. +type CSVOpt func(*csvOpts) + +type csvOpts struct { + csvReader csv.Reader + csvWriter csv.Writer + skippedLines int + closeStream bool +} + +// WithCSVReaderOpts specifies the options to csv.Reader +// when reading CSV. +func WithCSVReaderOpts(reader csv.Reader) CSVOpt { + return func(o *csvOpts) { + o.csvReader = reader + } +} + +// WithCSVWriterOpts specifies the options to csv.Writer +// when writing CSV. +func WithCSVWriterOpts(writer csv.Writer) CSVOpt { + return func(o *csvOpts) { + o.csvWriter = writer + } +} + +// WithCSVSkipLines will skip header lines. +func WithCSVSkipLines(skipped int) CSVOpt { + return func(o *csvOpts) { + o.skippedLines = skipped + } +} + +func WithCSVClosesStream() CSVOpt { + return func(o *csvOpts) { + o.closeStream = true + } +} + +func (o csvOpts) applyToReader(in *csv.Reader) { + if o.csvReader.Comma != 0 { + in.Comma = o.csvReader.Comma + } + if o.csvReader.Comment != 0 { + in.Comment = o.csvReader.Comment + } + if o.csvReader.FieldsPerRecord != 0 { + in.FieldsPerRecord = o.csvReader.FieldsPerRecord + } + + in.LazyQuotes = o.csvReader.LazyQuotes + in.TrimLeadingSpace = o.csvReader.TrimLeadingSpace + in.ReuseRecord = o.csvReader.ReuseRecord +} + +func (o csvOpts) applyToWriter(in *csv.Writer) { + if o.csvWriter.Comma != 0 { + in.Comma = o.csvWriter.Comma + } + in.UseCRLF = o.csvWriter.UseCRLF +} + +func csvOptsWithDefaults(opts []CSVOpt) csvOpts { + var o csvOpts + for _, apply := range opts { + apply(&o) + } + + return o +} + +type CSVWriter interface { + Write([]string) error + Flush() + Error() error +} + +type CSVReader interface { + Read() ([]string, error) +} + +var ( + _ CSVWriter = &csvRecordsWriter{} + _ CSVReader = &csvRecordsWriter{} +) + +// csvRecordsWriter is an internal container to move CSV records back and forth +type csvRecordsWriter struct { + i int + records [][]string +} + +func (w *csvRecordsWriter) Write(record []string) error { + w.records = append(w.records, record) + + return nil +} + +func (w *csvRecordsWriter) Read() ([]string, error) { + if w.i >= len(w.records) { + return nil, io.EOF + } + defer func() { + w.i++ + }() + + return w.records[w.i], nil +} + +func (w *csvRecordsWriter) Flush() {} + +func (w *csvRecordsWriter) Error() error { + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/discard.go b/vendor/github.com/go-openapi/runtime/discard.go new file mode 100644 index 0000000000..0d390cfd64 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/discard.go @@ -0,0 +1,9 @@ +package runtime + +import "io" + +// DiscardConsumer does absolutely nothing, it's a black hole. +var DiscardConsumer = ConsumerFunc(func(_ io.Reader, _ interface{}) error { return nil }) + +// DiscardProducer does absolutely nothing, it's a black hole. +var DiscardProducer = ProducerFunc(func(_ io.Writer, _ interface{}) error { return nil }) diff --git a/vendor/github.com/go-openapi/runtime/file.go b/vendor/github.com/go-openapi/runtime/file.go new file mode 100644 index 0000000000..397d8a4593 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/file.go @@ -0,0 +1,19 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import "github.com/go-openapi/swag" + +type File = swag.File diff --git a/vendor/github.com/go-openapi/runtime/headers.go b/vendor/github.com/go-openapi/runtime/headers.go new file mode 100644 index 0000000000..4d111db4fe --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/headers.go @@ -0,0 +1,45 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "mime" + "net/http" + + "github.com/go-openapi/errors" +) + +// ContentType parses a content type header +func ContentType(headers http.Header) (string, string, error) { + ct := headers.Get(HeaderContentType) + orig := ct + if ct == "" { + ct = DefaultMime + } + if ct == "" { + return "", "", nil + } + + mt, opts, err := mime.ParseMediaType(ct) + if err != nil { + return "", "", errors.NewParseError(HeaderContentType, "header", orig, err) + } + + if cs, ok := opts[charsetKey]; ok { + return mt, cs, nil + } + + return mt, "", nil +} diff --git a/vendor/github.com/go-openapi/runtime/interfaces.go b/vendor/github.com/go-openapi/runtime/interfaces.go new file mode 100644 index 0000000000..e334128683 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/interfaces.go @@ -0,0 +1,112 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "context" + "io" + "net/http" + + "github.com/go-openapi/strfmt" +) + +// OperationHandlerFunc an adapter for a function to the OperationHandler interface +type OperationHandlerFunc func(interface{}) (interface{}, error) + +// Handle implements the operation handler interface +func (s OperationHandlerFunc) Handle(data interface{}) (interface{}, error) { + return s(data) +} + +// OperationHandler a handler for a swagger operation +type OperationHandler interface { + Handle(interface{}) (interface{}, error) +} + +// ConsumerFunc represents a function that can be used as a consumer +type ConsumerFunc func(io.Reader, interface{}) error + +// Consume consumes the reader into the data parameter +func (fn ConsumerFunc) Consume(reader io.Reader, data interface{}) error { + return fn(reader, data) +} + +// Consumer implementations know how to bind the values on the provided interface to +// data provided by the request body +type Consumer interface { + // Consume performs the binding of request values + Consume(io.Reader, interface{}) error +} + +// ProducerFunc represents a function that can be used as a producer +type ProducerFunc func(io.Writer, interface{}) error + +// Produce produces the response for the provided data +func (f ProducerFunc) Produce(writer io.Writer, data interface{}) error { + return f(writer, data) +} + +// Producer implementations know how to turn the provided interface into a valid +// HTTP response +type Producer interface { + // Produce writes to the http response + Produce(io.Writer, interface{}) error +} + +// AuthenticatorFunc turns a function into an authenticator +type AuthenticatorFunc func(interface{}) (bool, interface{}, error) + +// Authenticate authenticates the request with the provided data +func (f AuthenticatorFunc) Authenticate(params interface{}) (bool, interface{}, error) { + return f(params) +} + +// Authenticator represents an authentication strategy +// implementations of Authenticator know how to authenticate the +// request data and translate that into a valid principal object or an error +type Authenticator interface { + Authenticate(interface{}) (bool, interface{}, error) +} + +// AuthorizerFunc turns a function into an authorizer +type AuthorizerFunc func(*http.Request, interface{}) error + +// Authorize authorizes the processing of the request for the principal +func (f AuthorizerFunc) Authorize(r *http.Request, principal interface{}) error { + return f(r, principal) +} + +// Authorizer represents an authorization strategy +// implementations of Authorizer know how to authorize the principal object +// using the request data and returns error if unauthorized +type Authorizer interface { + Authorize(*http.Request, interface{}) error +} + +// Validatable types implementing this interface allow customizing their validation +// this will be used instead of the reflective validation based on the spec document. +// the implementations are assumed to have been generated by the swagger tool so they should +// contain all the validations obtained from the spec +type Validatable interface { + Validate(strfmt.Registry) error +} + +// ContextValidatable types implementing this interface allow customizing their validation +// this will be used instead of the reflective validation based on the spec document. +// the implementations are assumed to have been generated by the swagger tool so they should +// contain all the context validations obtained from the spec +type ContextValidatable interface { + ContextValidate(context.Context, strfmt.Registry) error +} diff --git a/vendor/github.com/go-openapi/runtime/json.go b/vendor/github.com/go-openapi/runtime/json.go new file mode 100644 index 0000000000..5a690559cc --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/json.go @@ -0,0 +1,38 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONConsumer creates a new JSON consumer +func JSONConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + dec := json.NewDecoder(reader) + dec.UseNumber() // preserve number formats + return dec.Decode(data) + }) +} + +// JSONProducer creates a new JSON producer +func JSONProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + enc := json.NewEncoder(writer) + enc.SetEscapeHTML(false) + return enc.Encode(data) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go new file mode 100644 index 0000000000..9e3e1ecb14 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/request.go @@ -0,0 +1,149 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bufio" + "context" + "errors" + "io" + "net/http" + "strings" + + "github.com/go-openapi/swag" +) + +// CanHaveBody returns true if this method can have a body +func CanHaveBody(method string) bool { + mn := strings.ToUpper(method) + return mn == "POST" || mn == "PUT" || mn == "PATCH" || mn == "DELETE" +} + +// IsSafe returns true if this is a request with a safe method +func IsSafe(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn == "GET" || mn == "HEAD" +} + +// AllowsBody returns true if the request allows for a body +func AllowsBody(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn != "HEAD" +} + +// HasBody returns true if this method needs a content-type +func HasBody(r *http.Request) bool { + // happy case: we have a content length set + if r.ContentLength > 0 { + return true + } + + if r.Header.Get("content-length") != "" { + // in this case, no Transfer-Encoding should be present + // we have a header set but it was explicitly set to 0, so we assume no body + return false + } + + rdr := newPeekingReader(r.Body) + r.Body = rdr + return rdr.HasContent() +} + +func newPeekingReader(r io.ReadCloser) *peekingReader { + if r == nil { + return nil + } + return &peekingReader{ + underlying: bufio.NewReader(r), + orig: r, + } +} + +type peekingReader struct { + underlying interface { + Buffered() int + Peek(int) ([]byte, error) + Read([]byte) (int, error) + } + orig io.ReadCloser +} + +func (p *peekingReader) HasContent() bool { + if p == nil { + return false + } + if p.underlying.Buffered() > 0 { + return true + } + b, err := p.underlying.Peek(1) + if err != nil { + return false + } + return len(b) > 0 +} + +func (p *peekingReader) Read(d []byte) (int, error) { + if p == nil { + return 0, io.EOF + } + if p.underlying == nil { + return 0, io.ErrUnexpectedEOF + } + return p.underlying.Read(d) +} + +func (p *peekingReader) Close() error { + if p.underlying == nil { + return errors.New("reader already closed") + } + p.underlying = nil + if p.orig != nil { + return p.orig.Close() + } + return nil +} + +// JSONRequest creates a new http request with json headers set. +// +// It uses context.Background. +func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { + req, err := http.NewRequestWithContext(context.Background(), method, urlStr, body) + if err != nil { + return nil, err + } + req.Header.Add(HeaderContentType, JSONMime) + req.Header.Add(HeaderAccept, JSONMime) + return req, nil +} + +// Gettable for things with a method GetOK(string) (data string, hasKey bool, hasValue bool) +type Gettable interface { + GetOK(string) ([]string, bool, bool) +} + +// ReadSingleValue reads a single value from the source +func ReadSingleValue(values Gettable, name string) string { + vv, _, hv := values.GetOK(name) + if hv { + return vv[len(vv)-1] + } + return "" +} + +// ReadCollectionValue reads a collection value from a string data source +func ReadCollectionValue(values Gettable, name, collectionFormat string) []string { + v := ReadSingleValue(values, name) + return swag.SplitByFormat(v, collectionFormat) +} diff --git a/vendor/github.com/go-openapi/runtime/statuses.go b/vendor/github.com/go-openapi/runtime/statuses.go new file mode 100644 index 0000000000..3b011a0bff --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/statuses.go @@ -0,0 +1,90 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +// Statuses lists the most common HTTP status codes to default message +// taken from https://httpstatuses.com/ +var Statuses = map[int]string{ + 100: "Continue", + 101: "Switching Protocols", + 102: "Processing", + 103: "Checkpoint", + 122: "URI too long", + 200: "OK", + 201: "Created", + 202: "Accepted", + 203: "Request Processed", + 204: "No Content", + 205: "Reset Content", + 206: "Partial Content", + 207: "Multi-Status", + 208: "Already Reported", + 226: "IM Used", + 300: "Multiple Choices", + 301: "Moved Permanently", + 302: "Found", + 303: "See Other", + 304: "Not Modified", + 305: "Use Proxy", + 306: "Switch Proxy", + 307: "Temporary Redirect", + 308: "Permanent Redirect", + 400: "Bad Request", + 401: "Unauthorized", + 402: "Payment Required", + 403: "Forbidden", + 404: "Not Found", + 405: "Method Not Allowed", + 406: "Not Acceptable", + 407: "Proxy Authentication Required", + 408: "Request Timeout", + 409: "Conflict", + 410: "Gone", + 411: "Length Required", + 412: "Precondition Failed", + 413: "Request Entity Too Large", + 414: "Request-URI Too Long", + 415: "Unsupported Media Type", + 416: "Request Range Not Satisfiable", + 417: "Expectation Failed", + 418: "I'm a teapot", + 420: "Enhance Your Calm", + 422: "Unprocessable Entity", + 423: "Locked", + 424: "Failed Dependency", + 426: "Upgrade Required", + 428: "Precondition Required", + 429: "Too Many Requests", + 431: "Request Header Fields Too Large", + 444: "No Response", + 449: "Retry With", + 450: "Blocked by Windows Parental Controls", + 451: "Wrong Exchange Server", + 499: "Client Closed Request", + 500: "Internal Server Error", + 501: "Not Implemented", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", + 505: "HTTP Version Not Supported", + 506: "Variant Also Negotiates", + 507: "Insufficient Storage", + 508: "Loop Detected", + 509: "Bandwidth Limit Exceeded", + 510: "Not Extended", + 511: "Network Authentication Required", + 598: "Network read timeout error", + 599: "Network connect timeout error", +} diff --git a/vendor/github.com/go-openapi/runtime/text.go b/vendor/github.com/go-openapi/runtime/text.go new file mode 100644 index 0000000000..f33320b7dd --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/text.go @@ -0,0 +1,116 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + + "github.com/go-openapi/swag" +) + +// TextConsumer creates a new text consumer +func TextConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("TextConsumer requires a reader") // early exit + } + + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + + // If the buffer is empty, no need to unmarshal it, which causes a panic. + if len(b) == 0 { + return nil + } + + if tu, ok := data.(encoding.TextUnmarshaler); ok { + err := tu.UnmarshalText(b) + if err != nil { + return fmt.Errorf("text consumer: %v", err) + } + + return nil + } + + t := reflect.TypeOf(data) + if data != nil && t.Kind() == reflect.Ptr { + v := reflect.Indirect(reflect.ValueOf(data)) + if t.Elem().Kind() == reflect.String { + v.SetString(string(b)) + return nil + } + } + + return fmt.Errorf("%v (%T) is not supported by the TextConsumer, %s", + data, data, "can be resolved by supporting TextUnmarshaler interface") + }) +} + +// TextProducer creates a new text producer +func TextProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("TextProducer requires a writer") // early exit + } + + if data == nil { + return errors.New("no data given to produce text from") + } + + if tm, ok := data.(encoding.TextMarshaler); ok { + txt, err := tm.MarshalText() + if err != nil { + return fmt.Errorf("text producer: %v", err) + } + _, err = writer.Write(txt) + return err + } + + if str, ok := data.(error); ok { + _, err := writer.Write([]byte(str.Error())) + return err + } + + if str, ok := data.(fmt.Stringer); ok { + _, err := writer.Write([]byte(str.String())) + return err + } + + v := reflect.Indirect(reflect.ValueOf(data)) + if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { + b, err := swag.WriteJSON(data) + if err != nil { + return err + } + _, err = writer.Write(b) + return err + } + if v.Kind() != reflect.String { + return fmt.Errorf("%T is not a supported type by the TextProducer", data) + } + + _, err := writer.Write([]byte(v.String())) + return err + }) +} diff --git a/vendor/github.com/go-openapi/runtime/values.go b/vendor/github.com/go-openapi/runtime/values.go new file mode 100644 index 0000000000..11f5732af4 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/values.go @@ -0,0 +1,19 @@ +package runtime + +// Values typically represent parameters on a http request. +type Values map[string][]string + +// GetOK returns the values collection for the given key. +// When the key is present in the map it will return true for hasKey. +// When the value is not empty it will return true for hasValue. +func (v Values) GetOK(key string) (value []string, hasKey bool, hasValue bool) { + value, hasKey = v[key] + if !hasKey { + return + } + if len(value) == 0 { + return + } + hasValue = true + return +} diff --git a/vendor/github.com/go-openapi/runtime/xml.go b/vendor/github.com/go-openapi/runtime/xml.go new file mode 100644 index 0000000000..821c7393df --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/xml.go @@ -0,0 +1,36 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/xml" + "io" +) + +// XMLConsumer creates a new XML consumer +func XMLConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + dec := xml.NewDecoder(reader) + return dec.Decode(data) + }) +} + +// XMLProducer creates a new XML producer +func XMLProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + enc := xml.NewEncoder(writer) + return enc.Encode(data) + }) +} diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig new file mode 100644 index 0000000000..3152da69a5 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore new file mode 100644 index 0000000000..f47cb2045f --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.gitignore @@ -0,0 +1 @@ +*.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml new file mode 100644 index 0000000000..22f8d21cca --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md new file mode 100644 index 0000000000..7fd2810c69 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/README.md @@ -0,0 +1,54 @@ +# OpenAPI v2 object model [![Build Status](https://github.com/go-openapi/spec/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/spec/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/spec.svg)](https://pkg.go.dev/github.com/go-openapi/spec) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) + +The object model for OpenAPI specification documents. + +### FAQ + +* What does this do? + +> 1. This package knows how to marshal and unmarshal Swagger API specifications into a golang object model +> 2. It knows how to resolve $ref and expand them to make a single root document + +* How does it play with the rest of the go-openapi packages ? + +> 1. This package is at the core of the go-openapi suite of packages and [code generator](https://github.com/go-swagger/go-swagger) +> 2. There is a [spec loading package](https://github.com/go-openapi/loads) to fetch specs as JSON or YAML from local or remote locations +> 3. There is a [spec validation package](https://github.com/go-openapi/validate) built on top of it +> 4. There is a [spec analysis package](https://github.com/go-openapi/analysis) built on top of it, to analyze, flatten, fix and merge spec documents + +* Does this library support OpenAPI 3? + +> No. +> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). +> There is no plan to make it evolve toward supporting OpenAPI 3.x. +> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. +> +> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 + +* Does the unmarshaling support YAML? + +> Not directly. The exposed types know only how to unmarshal from JSON. +> +> In order to load a YAML document as a Swagger spec, you need to use the loaders provided by +> github.com/go-openapi/loads +> +> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec +> +> See also https://github.com/go-openapi/spec/issues/164 + +* How can I validate a spec? + +> Validation is provided by [the validate package](http://github.com/go-openapi/validate) + +* Why do we have an `ID` field for `Schema` which is not part of the swagger spec? + +> We found jsonschema compatibility more important: since `id` in jsonschema influences +> how `$ref` are resolved. +> This `id` does not conflict with any property named `id`. +> +> See also https://github.com/go-openapi/spec/issues/23 diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go new file mode 100644 index 0000000000..122993b44b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/cache.go @@ -0,0 +1,98 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "sync" +) + +// ResolutionCache a cache for resolving urls +type ResolutionCache interface { + Get(string) (interface{}, bool) + Set(string, interface{}) +} + +type simpleCache struct { + lock sync.RWMutex + store map[string]interface{} +} + +func (s *simpleCache) ShallowClone() ResolutionCache { + store := make(map[string]interface{}, len(s.store)) + s.lock.RLock() + for k, v := range s.store { + store[k] = v + } + s.lock.RUnlock() + + return &simpleCache{ + store: store, + } +} + +// Get retrieves a cached URI +func (s *simpleCache) Get(uri string) (interface{}, bool) { + s.lock.RLock() + v, ok := s.store[uri] + + s.lock.RUnlock() + return v, ok +} + +// Set caches a URI +func (s *simpleCache) Set(uri string, data interface{}) { + s.lock.Lock() + s.store[uri] = data + s.lock.Unlock() +} + +var ( + // resCache is a package level cache for $ref resolution and expansion. + // It is initialized lazily by methods that have the need for it: no + // memory is allocated unless some expander methods are called. + // + // It is initialized with JSON schema and swagger schema, + // which do not mutate during normal operations. + // + // All subsequent utilizations of this cache are produced from a shallow + // clone of this initial version. + resCache *simpleCache + onceCache sync.Once + + _ ResolutionCache = &simpleCache{} +) + +// initResolutionCache initializes the URI resolution cache. To be wrapped in a sync.Once.Do call. +func initResolutionCache() { + resCache = defaultResolutionCache() +} + +func defaultResolutionCache() *simpleCache { + return &simpleCache{store: map[string]interface{}{ + "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), + "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), + }} +} + +func cacheOrDefault(cache ResolutionCache) ResolutionCache { + onceCache.Do(initResolutionCache) + + if cache != nil { + return cache + } + + // get a shallow clone of the base cache with swagger and json schema + return resCache.ShallowClone() +} diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go new file mode 100644 index 0000000000..2f7bb219b5 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/contact_info.go @@ -0,0 +1,57 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +// ContactInfo contact information for the exposed API. +// +// For more information: http://goo.gl/8us55a#contactObject +type ContactInfo struct { + ContactInfoProps + VendorExtensible +} + +// ContactInfoProps hold the properties of a ContactInfo object +type ContactInfoProps struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` + Email string `json:"email,omitempty"` +} + +// UnmarshalJSON hydrates ContactInfo from json +func (c *ContactInfo) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &c.ContactInfoProps); err != nil { + return err + } + return json.Unmarshal(data, &c.VendorExtensible) +} + +// MarshalJSON produces ContactInfo as json +func (c ContactInfo) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(c.ContactInfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(c.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go new file mode 100644 index 0000000000..fc889f6d0b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/debug.go @@ -0,0 +1,49 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "fmt" + "log" + "os" + "path" + "runtime" +) + +// Debug is true when the SWAGGER_DEBUG env var is not empty. +// +// It enables a more verbose logging of this package. +var Debug = os.Getenv("SWAGGER_DEBUG") != "" + +var ( + // specLogger is a debug logger for this package + specLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) +} + +func debugLog(msg string, args ...interface{}) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + specLogger.Printf("%s:%d: %s", path.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/go-openapi/spec/embed.go b/vendor/github.com/go-openapi/spec/embed.go new file mode 100644 index 0000000000..1f4284750a --- /dev/null +++ b/vendor/github.com/go-openapi/spec/embed.go @@ -0,0 +1,17 @@ +package spec + +import ( + "embed" + "path" +) + +//go:embed schemas/*.json schemas/*/*.json +var assets embed.FS + +func jsonschemaDraft04JSONBytes() ([]byte, error) { + return assets.ReadFile(path.Join("schemas", "jsonschema-draft-04.json")) +} + +func v2SchemaJSONBytes() ([]byte, error) { + return assets.ReadFile(path.Join("schemas", "v2", "schema.json")) +} diff --git a/vendor/github.com/go-openapi/spec/errors.go b/vendor/github.com/go-openapi/spec/errors.go new file mode 100644 index 0000000000..6992c7ba73 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/errors.go @@ -0,0 +1,19 @@ +package spec + +import "errors" + +// Error codes +var ( + // ErrUnknownTypeForReference indicates that a resolved reference was found in an unsupported container type + ErrUnknownTypeForReference = errors.New("unknown type for the resolved reference") + + // ErrResolveRefNeedsAPointer indicates that a $ref target must be a valid JSON pointer + ErrResolveRefNeedsAPointer = errors.New("resolve ref: target needs to be a pointer") + + // ErrDerefUnsupportedType indicates that a resolved reference was found in an unsupported container type. + // At the moment, $ref are supported only inside: schemas, parameters, responses, path items + ErrDerefUnsupportedType = errors.New("deref: unsupported type") + + // ErrExpandUnsupportedType indicates that $ref expansion is attempted on some invalid type + ErrExpandUnsupportedType = errors.New("expand: unsupported type. Input should be of type *Parameter or *Response") +) diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go new file mode 100644 index 0000000000..b81a5699a0 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -0,0 +1,607 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" +) + +// ExpandOptions provides options for the spec expander. +// +// RelativeBase is the path to the root document. This can be a remote URL or a path to a local file. +// +// If left empty, the root document is assumed to be located in the current working directory: +// all relative $ref's will be resolved from there. +// +// PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable. +type ExpandOptions struct { + RelativeBase string // the path to the root document to expand. This is a file, not a directory + SkipSchemas bool // do not expand schemas, just paths, parameters and responses + ContinueOnError bool // continue expanding even after and error is found + PathLoader func(string) (json.RawMessage, error) `json:"-"` // the document loading method that takes a path as input and yields a json document + AbsoluteCircularRef bool // circular $ref remaining after expansion remain absolute URLs +} + +func optionsOrDefault(opts *ExpandOptions) *ExpandOptions { + if opts != nil { + clone := *opts // shallow clone to avoid internal changes to be propagated to the caller + if clone.RelativeBase != "" { + clone.RelativeBase = normalizeBase(clone.RelativeBase) + } + // if the relative base is empty, let the schema loader choose a pseudo root document + return &clone + } + return &ExpandOptions{} +} + +// ExpandSpec expands the references in a swagger spec +func ExpandSpec(spec *Swagger, options *ExpandOptions) error { + options = optionsOrDefault(options) + resolver := defaultSchemaLoader(spec, options, nil, nil) + + specBasePath := options.RelativeBase + + if !options.SkipSchemas { + for key, definition := range spec.Definitions { + parentRefs := make([]string, 0, 10) + parentRefs = append(parentRefs, "#/definitions/"+key) + + def, err := expandSchema(definition, parentRefs, resolver, specBasePath) + if resolver.shouldStopOnError(err) { + return err + } + if def != nil { + spec.Definitions[key] = *def + } + } + } + + for key := range spec.Parameters { + parameter := spec.Parameters[key] + if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Parameters[key] = parameter + } + + for key := range spec.Responses { + response := spec.Responses[key] + if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Responses[key] = response + } + + if spec.Paths != nil { + for key := range spec.Paths.Paths { + pth := spec.Paths.Paths[key] + if err := expandPathItem(&pth, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Paths.Paths[key] = pth + } + } + + return nil +} + +const rootBase = ".root" + +// baseForRoot loads in the cache the root document and produces a fake ".root" base path entry +// for further $ref resolution +func baseForRoot(root interface{}, cache ResolutionCache) string { + // cache the root document to resolve $ref's + normalizedBase := normalizeBase(rootBase) + + if root == nil { + // ensure that we never leave a nil root: always cache the root base pseudo-document + cachedRoot, found := cache.Get(normalizedBase) + if found && cachedRoot != nil { + // the cache is already preloaded with a root + return normalizedBase + } + + root = map[string]interface{}{} + } + + cache.Set(normalizedBase, root) + + return normalizedBase +} + +// ExpandSchema expands the refs in the schema object with reference to the root object. +// +// go-openapi/validate uses this function. +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandSchemaWithBasePath to resolve external references). +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + if root == nil { + root = schema + } + + opts := &ExpandOptions{ + // when a root is specified, cache the root as an in-memory document for $ref retrieval + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + } + + return ExpandSchemaWithBasePath(schema, cache, opts) +} + +// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options. +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error { + if schema == nil { + return nil + } + + cache = cacheOrDefault(cache) + + opts = optionsOrDefault(opts) + + resolver := defaultSchemaLoader(nil, opts, cache, nil) + + parentRefs := make([]string, 0, 10) + s, err := expandSchema(*schema, parentRefs, resolver, opts.RelativeBase) + if err != nil { + return err + } + if s != nil { + // guard for when continuing on error + *schema = *s + } + + return nil +} + +func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Items == nil { + return &target, nil + } + + // array + if target.Items.Schema != nil { + t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + *target.Items.Schema = *t + } + + // tuple + for i := range target.Items.Schemas { + t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + target.Items.Schemas[i] = *t + } + + return &target, nil +} + +func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Ref.String() == "" && target.Ref.IsRoot() { + newRef := normalizeRef(&target.Ref, basePath) + target.Ref = *newRef + return &target, nil + } + + // change the base path of resolution when an ID is encountered + // otherwise the basePath should inherit the parent's + if target.ID != "" { + basePath, _ = resolver.setSchemaID(target, target.ID, basePath) + } + + if target.Ref.String() != "" { + if !resolver.options.SkipSchemas { + return expandSchemaRef(target, parentRefs, resolver, basePath) + } + + // when "expand" with SkipSchema, we just rebase the existing $ref without replacing + // the full schema. + rebasedRef, err := NewRef(normalizeURI(target.Ref.String(), basePath)) + if err != nil { + return nil, err + } + target.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + + return &target, nil + } + + for k := range target.Definitions { + tt, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if tt != nil { + target.Definitions[k] = *tt + } + } + + t, err := expandItems(target, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target = *t + } + + for i := range target.AllOf { + t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.AllOf[i] = *t + } + } + + for i := range target.AnyOf { + t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.AnyOf[i] = *t + } + } + + for i := range target.OneOf { + t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.OneOf[i] = *t + } + } + + if target.Not != nil { + t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Not = *t + } + } + + for k := range target.Properties { + t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.Properties[k] = *t + } + } + + if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { + t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalProperties.Schema = *t + } + } + + for k := range target.PatternProperties { + t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.PatternProperties[k] = *t + } + } + + for k := range target.Dependencies { + if target.Dependencies[k].Schema != nil { + t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Dependencies[k].Schema = *t + } + } + } + + if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { + t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalItems.Schema = *t + } + } + return &target, nil +} + +func expandSchemaRef(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + // if a Ref is found, all sibling fields are skipped + // Ref also changes the resolution scope of children expandSchema + + // here the resolution scope is changed because a $ref was encountered + normalizedRef := normalizeRef(&target.Ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if resolver.isCircular(normalizedRef, basePath, parentRefs...) { + // this means there is a cycle in the recursion tree: return the Ref + // - circular refs cannot be expanded. We leave them as ref. + // - denormalization means that a new local file ref is set relative to the original basePath + debugLog("short circuit circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", + basePath, normalizedBasePath, normalizedRef.String()) + if !resolver.options.AbsoluteCircularRef { + target.Ref = denormalizeRef(normalizedRef, resolver.context.basePath, resolver.context.rootID) + } else { + target.Ref = *normalizedRef + } + return &target, nil + } + + var t *Schema + err := resolver.Resolve(&target.Ref, &t, basePath) + if resolver.shouldStopOnError(err) { + return nil, err + } + + if t == nil { + // guard for when continuing on error + return &target, nil + } + + parentRefs = append(parentRefs, normalizedRef.String()) + transitiveResolver := resolver.transitiveResolver(basePath, target.Ref) + + basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) + + return expandSchema(*t, parentRefs, transitiveResolver, basePath) +} + +func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { + if pathItem == nil { + return nil + } + + parentRefs := make([]string, 0, 10) + if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + if pathItem.Ref.String() != "" { + transitiveResolver := resolver.transitiveResolver(basePath, pathItem.Ref) + basePath = transitiveResolver.updateBasePath(resolver, basePath) + resolver = transitiveResolver + } + + pathItem.Ref = Ref{} + for i := range pathItem.Parameters { + if err := expandParameterOrResponse(&(pathItem.Parameters[i]), resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + + ops := []*Operation{ + pathItem.Get, + pathItem.Head, + pathItem.Options, + pathItem.Put, + pathItem.Post, + pathItem.Patch, + pathItem.Delete, + } + for _, op := range ops { + if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + + return nil +} + +func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error { + if op == nil { + return nil + } + + for i := range op.Parameters { + param := op.Parameters[i] + if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + op.Parameters[i] = param + } + + if op.Responses == nil { + return nil + } + + responses := op.Responses + if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + + for code := range responses.StatusCodeResponses { + response := responses.StatusCodeResponses[code] + if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + responses.StatusCodeResponses[code] = response + } + + return nil +} + +// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandResponse to resolve external references). +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + } + resolver := defaultSchemaLoader(root, opts, cache, nil) + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandResponse expands a response based on a basepath +// +// All refs inside response will be resolved relative to basePath +func ExpandResponse(response *Response, basePath string) error { + opts := optionsOrDefault(&ExpandOptions{ + RelativeBase: basePath, + }) + resolver := defaultSchemaLoader(nil, opts, nil, nil) + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document. +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandParameter to resolve external references). +func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + } + resolver := defaultSchemaLoader(root, opts, cache, nil) + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +// ExpandParameter expands a parameter based on a basepath. +// This is the exported version of expandParameter +// all refs inside parameter will be resolved relative to basePath +func ExpandParameter(parameter *Parameter, basePath string) error { + opts := optionsOrDefault(&ExpandOptions{ + RelativeBase: basePath, + }) + resolver := defaultSchemaLoader(nil, opts, nil, nil) + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { + var ( + ref *Ref + sch *Schema + ) + + switch refable := input.(type) { + case *Parameter: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + case *Response: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + default: + return nil, nil, fmt.Errorf("unsupported type: %T: %w", input, ErrExpandUnsupportedType) + } + + return ref, sch, nil +} + +func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { + ref, sch, err := getRefAndSchema(input) + if err != nil { + return err + } + + if ref == nil && sch == nil { // nothing to do + return nil + } + + parentRefs := make([]string, 0, 10) + if ref != nil { + // dereference this $ref + if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + ref, sch, _ = getRefAndSchema(input) + } + + if ref.String() != "" { + transitiveResolver := resolver.transitiveResolver(basePath, *ref) + basePath = resolver.updateBasePath(transitiveResolver, basePath) + resolver = transitiveResolver + } + + if sch == nil { + // nothing to be expanded + if ref != nil { + *ref = Ref{} + } + + return nil + } + + if sch.Ref.String() != "" { + rebasedRef, ern := NewRef(normalizeURI(sch.Ref.String(), basePath)) + if ern != nil { + return ern + } + + if resolver.isCircular(&rebasedRef, basePath, parentRefs...) { + // this is a circular $ref: stop expansion + if !resolver.options.AbsoluteCircularRef { + sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + } else { + sch.Ref = rebasedRef + } + } + } + + // $ref expansion or rebasing is performed by expandSchema below + if ref != nil { + *ref = Ref{} + } + + // expand schema + // yes, we do it even if options.SkipSchema is true: we have to go down that rabbit hole and rebase nested $ref) + s, err := expandSchema(*sch, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return err + } + + if s != nil { // guard for when continuing on error + *sch = *s + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go new file mode 100644 index 0000000000..88add91b2b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/external_docs.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// ExternalDocumentation allows referencing an external resource for +// extended documentation. +// +// For more information: http://goo.gl/8us55a#externalDocumentationObject +type ExternalDocumentation struct { + Description string `json:"description,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go new file mode 100644 index 0000000000..9dfd17b185 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/header.go @@ -0,0 +1,203 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + jsonArray = "array" +) + +// HeaderProps describes a response header +type HeaderProps struct { + Description string `json:"description,omitempty"` +} + +// Header describes a header for a response of the API +// +// For more information: http://goo.gl/8us55a#headerObject +type Header struct { + CommonValidations + SimpleSchema + VendorExtensible + HeaderProps +} + +// ResponseHeader creates a new header instance for use in a response +func ResponseHeader() *Header { + return new(Header) +} + +// WithDescription sets the description on this response, allows for chaining +func (h *Header) WithDescription(description string) *Header { + h.Description = description + return h +} + +// Typed a fluent builder method for the type of parameter +func (h *Header) Typed(tpe, format string) *Header { + h.Type = tpe + h.Format = format + return h +} + +// CollectionOf a fluent builder method for an array item +func (h *Header) CollectionOf(items *Items, format string) *Header { + h.Type = jsonArray + h.Items = items + h.CollectionFormat = format + return h +} + +// WithDefault sets the default value on this item +func (h *Header) WithDefault(defaultValue interface{}) *Header { + h.Default = defaultValue + return h +} + +// WithMaxLength sets a max length value +func (h *Header) WithMaxLength(max int64) *Header { + h.MaxLength = &max + return h +} + +// WithMinLength sets a min length value +func (h *Header) WithMinLength(min int64) *Header { + h.MinLength = &min + return h +} + +// WithPattern sets a pattern value +func (h *Header) WithPattern(pattern string) *Header { + h.Pattern = pattern + return h +} + +// WithMultipleOf sets a multiple of value +func (h *Header) WithMultipleOf(number float64) *Header { + h.MultipleOf = &number + return h +} + +// WithMaximum sets a maximum number value +func (h *Header) WithMaximum(max float64, exclusive bool) *Header { + h.Maximum = &max + h.ExclusiveMaximum = exclusive + return h +} + +// WithMinimum sets a minimum number value +func (h *Header) WithMinimum(min float64, exclusive bool) *Header { + h.Minimum = &min + h.ExclusiveMinimum = exclusive + return h +} + +// WithEnum sets a the enum values (replace) +func (h *Header) WithEnum(values ...interface{}) *Header { + h.Enum = append([]interface{}{}, values...) + return h +} + +// WithMaxItems sets the max items +func (h *Header) WithMaxItems(size int64) *Header { + h.MaxItems = &size + return h +} + +// WithMinItems sets the min items +func (h *Header) WithMinItems(size int64) *Header { + h.MinItems = &size + return h +} + +// UniqueValues dictates that this array can only have unique items +func (h *Header) UniqueValues() *Header { + h.UniqueItems = true + return h +} + +// AllowDuplicates this array can have duplicates +func (h *Header) AllowDuplicates() *Header { + h.UniqueItems = false + return h +} + +// WithValidations is a fluent method to set header validations +func (h *Header) WithValidations(val CommonValidations) *Header { + h.SetValidations(SchemaValidations{CommonValidations: val}) + return h +} + +// MarshalJSON marshal this to JSON +func (h Header) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(h.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(h.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(h.HeaderProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// UnmarshalJSON unmarshals this header from JSON +func (h *Header) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &h.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &h.HeaderProps) +} + +// JSONLookup look up a value by the json property name +func (h Header) JSONLookup(token string) (interface{}, error) { + if ex, ok := h.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go new file mode 100644 index 0000000000..582f0fd4c4 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/info.go @@ -0,0 +1,184 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Extensions vendor specific extensions +type Extensions map[string]interface{} + +// Add adds a value to these extensions +func (e Extensions) Add(key string, value interface{}) { + realKey := strings.ToLower(key) + e[realKey] = value +} + +// GetString gets a string value from the extensions +func (e Extensions) GetString(key string) (string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(string) + return str, ok + } + return "", false +} + +// GetInt gets a int value from the extensions +func (e Extensions) GetInt(key string) (int, bool) { + realKey := strings.ToLower(key) + + if v, ok := e.GetString(realKey); ok { + if r, err := strconv.Atoi(v); err == nil { + return r, true + } + } + + if v, ok := e[realKey]; ok { + if r, rOk := v.(float64); rOk { + return int(r), true + } + } + return -1, false +} + +// GetBool gets a string value from the extensions +func (e Extensions) GetBool(key string) (bool, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(bool) + return str, ok + } + return false, false +} + +// GetStringSlice gets a string value from the extensions +func (e Extensions) GetStringSlice(key string) ([]string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + arr, isSlice := v.([]interface{}) + if !isSlice { + return nil, false + } + var strs []string + for _, iface := range arr { + str, isString := iface.(string) + if !isString { + return nil, false + } + strs = append(strs, str) + } + return strs, ok + } + return nil, false +} + +// VendorExtensible composition block. +type VendorExtensible struct { + Extensions Extensions +} + +// AddExtension adds an extension to this extensible object +func (v *VendorExtensible) AddExtension(key string, value interface{}) { + if value == nil { + return + } + if v.Extensions == nil { + v.Extensions = make(map[string]interface{}) + } + v.Extensions.Add(key, value) +} + +// MarshalJSON marshals the extensions to json +func (v VendorExtensible) MarshalJSON() ([]byte, error) { + toser := make(map[string]interface{}) + for k, v := range v.Extensions { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + toser[k] = v + } + } + return json.Marshal(toser) +} + +// UnmarshalJSON for this extensible object +func (v *VendorExtensible) UnmarshalJSON(data []byte) error { + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if v.Extensions == nil { + v.Extensions = map[string]interface{}{} + } + v.Extensions[k] = vv + } + } + return nil +} + +// InfoProps the properties for an info definition +type InfoProps struct { + Description string `json:"description,omitempty"` + Title string `json:"title,omitempty"` + TermsOfService string `json:"termsOfService,omitempty"` + Contact *ContactInfo `json:"contact,omitempty"` + License *License `json:"license,omitempty"` + Version string `json:"version,omitempty"` +} + +// Info object provides metadata about the API. +// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. +// +// For more information: http://goo.gl/8us55a#infoObject +type Info struct { + VendorExtensible + InfoProps +} + +// JSONLookup look up a value by the json property name +func (i Info) JSONLookup(token string) (interface{}, error) { + if ex, ok := i.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(i.InfoProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (i Info) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.InfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (i *Info) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &i.InfoProps); err != nil { + return err + } + return json.Unmarshal(data, &i.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go new file mode 100644 index 0000000000..e2afb2133b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/items.go @@ -0,0 +1,234 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + jsonRef = "$ref" +) + +// SimpleSchema describe swagger simple schemas for parameters and headers +type SimpleSchema struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitempty"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// TypeName return the type (or format) of a simple schema +func (s *SimpleSchema) TypeName() string { + if s.Format != "" { + return s.Format + } + return s.Type +} + +// ItemsTypeName yields the type of items in a simple schema array +func (s *SimpleSchema) ItemsTypeName() string { + if s.Items == nil { + return "" + } + return s.Items.TypeName() +} + +// Items a limited subset of JSON-Schema's items object. +// It is used by parameter definitions that are not located in "body". +// +// For more information: http://goo.gl/8us55a#items-object +type Items struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible +} + +// NewItems creates a new instance of items +func NewItems() *Items { + return &Items{} +} + +// Typed a fluent builder method for the type of item +func (i *Items) Typed(tpe, format string) *Items { + i.Type = tpe + i.Format = format + return i +} + +// AsNullable flags this schema as nullable. +func (i *Items) AsNullable() *Items { + i.Nullable = true + return i +} + +// CollectionOf a fluent builder method for an array item +func (i *Items) CollectionOf(items *Items, format string) *Items { + i.Type = jsonArray + i.Items = items + i.CollectionFormat = format + return i +} + +// WithDefault sets the default value on this item +func (i *Items) WithDefault(defaultValue interface{}) *Items { + i.Default = defaultValue + return i +} + +// WithMaxLength sets a max length value +func (i *Items) WithMaxLength(max int64) *Items { + i.MaxLength = &max + return i +} + +// WithMinLength sets a min length value +func (i *Items) WithMinLength(min int64) *Items { + i.MinLength = &min + return i +} + +// WithPattern sets a pattern value +func (i *Items) WithPattern(pattern string) *Items { + i.Pattern = pattern + return i +} + +// WithMultipleOf sets a multiple of value +func (i *Items) WithMultipleOf(number float64) *Items { + i.MultipleOf = &number + return i +} + +// WithMaximum sets a maximum number value +func (i *Items) WithMaximum(max float64, exclusive bool) *Items { + i.Maximum = &max + i.ExclusiveMaximum = exclusive + return i +} + +// WithMinimum sets a minimum number value +func (i *Items) WithMinimum(min float64, exclusive bool) *Items { + i.Minimum = &min + i.ExclusiveMinimum = exclusive + return i +} + +// WithEnum sets a the enum values (replace) +func (i *Items) WithEnum(values ...interface{}) *Items { + i.Enum = append([]interface{}{}, values...) + return i +} + +// WithMaxItems sets the max items +func (i *Items) WithMaxItems(size int64) *Items { + i.MaxItems = &size + return i +} + +// WithMinItems sets the min items +func (i *Items) WithMinItems(size int64) *Items { + i.MinItems = &size + return i +} + +// UniqueValues dictates that this array can only have unique items +func (i *Items) UniqueValues() *Items { + i.UniqueItems = true + return i +} + +// AllowDuplicates this array can have duplicates +func (i *Items) AllowDuplicates() *Items { + i.UniqueItems = false + return i +} + +// WithValidations is a fluent method to set Items validations +func (i *Items) WithValidations(val CommonValidations) *Items { + i.SetValidations(SchemaValidations{CommonValidations: val}) + return i +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (i *Items) UnmarshalJSON(data []byte) error { + var validations CommonValidations + if err := json.Unmarshal(data, &validations); err != nil { + return err + } + var ref Refable + if err := json.Unmarshal(data, &ref); err != nil { + return err + } + var simpleSchema SimpleSchema + if err := json.Unmarshal(data, &simpleSchema); err != nil { + return err + } + var vendorExtensible VendorExtensible + if err := json.Unmarshal(data, &vendorExtensible); err != nil { + return err + } + i.Refable = ref + i.CommonValidations = validations + i.SimpleSchema = simpleSchema + i.VendorExtensible = vendorExtensible + return nil +} + +// MarshalJSON converts this items object to JSON +func (i Items) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(i.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b4, b3, b1, b2), nil +} + +// JSONLookup look up a value by the json property name +func (i Items) JSONLookup(token string) (interface{}, error) { + if token == jsonRef { + return &i.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go new file mode 100644 index 0000000000..b42f80368e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/license.go @@ -0,0 +1,56 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +// License information for the exposed API. +// +// For more information: http://goo.gl/8us55a#licenseObject +type License struct { + LicenseProps + VendorExtensible +} + +// LicenseProps holds the properties of a License object +type LicenseProps struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` +} + +// UnmarshalJSON hydrates License from json +func (l *License) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &l.LicenseProps); err != nil { + return err + } + return json.Unmarshal(data, &l.VendorExtensible) +} + +// MarshalJSON produces License as json +func (l License) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(l.LicenseProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(l.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go new file mode 100644 index 0000000000..e8b6009945 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer.go @@ -0,0 +1,202 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "path" + "strings" +) + +const fileScheme = "file" + +// normalizeURI ensures that all $ref paths used internally by the expander are canonicalized. +// +// NOTE(windows): there is a tolerance over the strict URI format on windows. +// +// The normalizer accepts relative file URLs like 'Path\File.JSON' as well as absolute file URLs like +// 'C:\Path\file.Yaml'. +// +// Both are canonicalized with a "file://" scheme, slashes and a lower-cased path: +// 'file:///c:/path/file.yaml' +// +// URLs can be specified with a file scheme, like in 'file:///folder/file.json' or +// 'file:///c:\folder\File.json'. +// +// URLs like file://C:\folder are considered invalid (i.e. there is no host 'c:\folder') and a "repair" +// is attempted. +// +// The base path argument is assumed to be canonicalized (e.g. using normalizeBase()). +func normalizeURI(refPath, base string) string { + refURL, err := parseURL(refPath) + if err != nil { + specLogger.Printf("warning: invalid URI in $ref %q: %v", refPath, err) + refURL, refPath = repairURI(refPath) + } + + fixWindowsURI(refURL, refPath) // noop on non-windows OS + + refURL.Path = path.Clean(refURL.Path) + if refURL.Path == "." { + refURL.Path = "" + } + + r := MustCreateRef(refURL.String()) + if r.IsCanonical() { + return refURL.String() + } + + baseURL, _ := parseURL(base) + if path.IsAbs(refURL.Path) { + baseURL.Path = refURL.Path + } else if refURL.Path != "" { + baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) + } + // copying fragment from ref to base + baseURL.Fragment = refURL.Fragment + + return baseURL.String() +} + +// denormalizeRef returns the simplest notation for a normalized $ref, given the path of the original root document. +// +// When calling this, we assume that: +// * $ref is a canonical URI +// * originalRelativeBase is a canonical URI +// +// denormalizeRef is currently used when we rewrite a $ref after a circular $ref has been detected. +// In this case, expansion stops and normally renders the internal canonical $ref. +// +// This internal $ref is eventually rebased to the original RelativeBase used for the expansion. +// +// There is a special case for schemas that are anchored with an "id": +// in that case, the rebasing is performed // against the id only if this is an anchor for the initial root document. +// All other intermediate "id"'s found along the way are ignored for the purpose of rebasing. +func denormalizeRef(ref *Ref, originalRelativeBase, id string) Ref { + debugLog("denormalizeRef called:\n$ref: %q\noriginal: %s\nroot ID:%s", ref.String(), originalRelativeBase, id) + + if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { + // short circuit: $ref to current doc + return *ref + } + + if id != "" { + idBaseURL, err := parseURL(id) + if err == nil { // if the schema id is not usable as a URI, ignore it + if ref, ok := rebase(ref, idBaseURL, true); ok { // rebase, but keep references to root unchaged (do not want $ref: "") + // $ref relative to the ID of the schema in the root document + return ref + } + } + } + + originalRelativeBaseURL, _ := parseURL(originalRelativeBase) + + r, _ := rebase(ref, originalRelativeBaseURL, false) + + return r +} + +func rebase(ref *Ref, v *url.URL, notEqual bool) (Ref, bool) { + var newBase url.URL + + u := ref.GetURL() + + if u.Scheme != v.Scheme || u.Host != v.Host { + return *ref, false + } + + docPath := v.Path + v.Path = path.Dir(v.Path) + + if v.Path == "." { + v.Path = "" + } else if !strings.HasSuffix(v.Path, "/") { + v.Path += "/" + } + + newBase.Fragment = u.Fragment + + if strings.HasPrefix(u.Path, docPath) { + newBase.Path = strings.TrimPrefix(u.Path, docPath) + } else { + newBase.Path = strings.TrimPrefix(u.Path, v.Path) + } + + if notEqual && newBase.Path == "" && newBase.Fragment == "" { + // do not want rebasing to end up in an empty $ref + return *ref, false + } + + if path.IsAbs(newBase.Path) { + // whenever we end up with an absolute path, specify the scheme and host + newBase.Scheme = v.Scheme + newBase.Host = v.Host + } + + return MustCreateRef(newBase.String()), true +} + +// normalizeRef canonicalize a Ref, using a canonical relativeBase as its absolute anchor +func normalizeRef(ref *Ref, relativeBase string) *Ref { + r := MustCreateRef(normalizeURI(ref.String(), relativeBase)) + return &r +} + +// normalizeBase performs a normalization of the input base path. +// +// This always yields a canonical URI (absolute), usable for the document cache. +// +// It ensures that all further internal work on basePath may safely assume +// a non-empty, cross-platform, canonical URI (i.e. absolute). +// +// This normalization tolerates windows paths (e.g. C:\x\y\File.dat) and transform this +// in a file:// URL with lower cased drive letter and path. +// +// See also: https://en.wikipedia.org/wiki/File_URI_scheme +func normalizeBase(in string) string { + u, err := parseURL(in) + if err != nil { + specLogger.Printf("warning: invalid URI in RelativeBase %q: %v", in, err) + u, in = repairURI(in) + } + + u.Fragment = "" // any fragment in the base is irrelevant + + fixWindowsURI(u, in) // noop on non-windows OS + + u.Path = path.Clean(u.Path) + if u.Path == "." { // empty after Clean() + u.Path = "" + } + + if u.Scheme != "" { + if path.IsAbs(u.Path) || u.Scheme != fileScheme { + // this is absolute or explicitly not a local file: we're good + return u.String() + } + } + + // no scheme or file scheme with relative path: assume file and make it absolute + // enforce scheme file://... with absolute path. + // + // If the input path is relative, we anchor the path to the current working directory. + // NOTE: we may end up with a host component. Leave it unchanged: e.g. file://host/folder/file.json + + u.Scheme = fileScheme + u.Path = absPath(u.Path) // platform-dependent + u.RawQuery = "" // any query component is irrelevant for a base + return u.String() +} diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go new file mode 100644 index 0000000000..f19f1a8fb6 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go @@ -0,0 +1,44 @@ +//go:build !windows +// +build !windows + +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "path/filepath" +) + +// absPath makes a file path absolute and compatible with a URI path component. +// +// The parameter must be a path, not an URI. +func absPath(in string) string { + anchored, err := filepath.Abs(in) + if err != nil { + specLogger.Printf("warning: could not resolve current working directory: %v", err) + return in + } + return anchored +} + +func repairURI(in string) (*url.URL, string) { + u, _ := parseURL("") + debugLog("repaired URI: original: %q, repaired: %q", in, "") + return u, "" +} + +func fixWindowsURI(_ *url.URL, _ string) { +} diff --git a/vendor/github.com/go-openapi/spec/normalizer_windows.go b/vendor/github.com/go-openapi/spec/normalizer_windows.go new file mode 100644 index 0000000000..a66c532dbc --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer_windows.go @@ -0,0 +1,154 @@ +// -build windows + +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "os" + "path" + "path/filepath" + "strings" +) + +// absPath makes a file path absolute and compatible with a URI path component +// +// The parameter must be a path, not an URI. +func absPath(in string) string { + // NOTE(windows): filepath.Abs exhibits a special behavior on windows for empty paths. + // See https://github.com/golang/go/issues/24441 + if in == "" { + in = "." + } + + anchored, err := filepath.Abs(in) + if err != nil { + specLogger.Printf("warning: could not resolve current working directory: %v", err) + return in + } + + pth := strings.ReplaceAll(strings.ToLower(anchored), `\`, `/`) + if !strings.HasPrefix(pth, "/") { + pth = "/" + pth + } + + return path.Clean(pth) +} + +// repairURI tolerates invalid file URIs with common typos +// such as 'file://E:\folder\file', that break the regular URL parser. +// +// Adopting the same defaults as for unixes (e.g. return an empty path) would +// result into a counter-intuitive result for that case (e.g. E:\folder\file is +// eventually resolved as the current directory). The repair will detect the missing "/". +// +// Note that this only works for the file scheme. +func repairURI(in string) (*url.URL, string) { + const prefix = fileScheme + "://" + if !strings.HasPrefix(in, prefix) { + // giving up: resolve to empty path + u, _ := parseURL("") + + return u, "" + } + + // attempt the repair, stripping the scheme should be sufficient + u, _ := parseURL(strings.TrimPrefix(in, prefix)) + debugLog("repaired URI: original: %q, repaired: %q", in, u.String()) + + return u, u.String() +} + +// fixWindowsURI tolerates an absolute file path on windows such as C:\Base\File.yaml or \\host\share\Base\File.yaml +// and makes it a canonical URI: file:///c:/base/file.yaml +// +// Catch 22 notes for Windows: +// +// * There may be a drive letter on windows (it is lower-cased) +// * There may be a share UNC, e.g. \\server\folder\data.xml +// * Paths are case insensitive +// * Paths may already contain slashes +// * Paths must be slashed +// +// NOTE: there is no escaping. "/" may be valid separators just like "\". +// We don't use ToSlash() (which escapes everything) because windows now also +// tolerates the use of "/". Hence, both C:\File.yaml and C:/File.yaml will work. +func fixWindowsURI(u *url.URL, in string) { + drive := filepath.VolumeName(in) + + if len(drive) > 0 { + if len(u.Scheme) == 1 && strings.EqualFold(u.Scheme, drive[:1]) { // a path with a drive letter + u.Scheme = fileScheme + u.Host = "" + u.Path = strings.Join([]string{drive, u.Opaque, u.Path}, `/`) // reconstruct the full path component (no fragment, no query) + } else if u.Host == "" && strings.HasPrefix(u.Path, drive) { // a path with a \\host volume + // NOTE: the special host@port syntax for UNC is not supported (yet) + u.Scheme = fileScheme + + // this is a modified version of filepath.Dir() to apply on the VolumeName itself + i := len(drive) - 1 + for i >= 0 && !os.IsPathSeparator(drive[i]) { + i-- + } + host := drive[:i] // \\host\share => host + + u.Path = strings.TrimPrefix(u.Path, host) + u.Host = strings.TrimPrefix(host, `\\`) + } + + u.Opaque = "" + u.Path = strings.ReplaceAll(strings.ToLower(u.Path), `\`, `/`) + + // ensure we form an absolute path + if !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + + u.Path = path.Clean(u.Path) + + return + } + + if u.Scheme == fileScheme { + // Handle dodgy cases for file://{...} URIs on windows. + // A canonical URI should always be followed by an absolute path. + // + // Examples: + // * file:///folder/file => valid, unchanged + // * file:///c:\folder\file => slashed + // * file:///./folder/file => valid, cleaned to remove the dot + // * file:///.\folder\file => remapped to cwd + // * file:///. => dodgy, remapped to / (consistent with the behavior on unix) + // * file:///.. => dodgy, remapped to / (consistent with the behavior on unix) + if (!path.IsAbs(u.Path) && !filepath.IsAbs(u.Path)) || (strings.HasPrefix(u.Path, `/.`) && strings.Contains(u.Path, `\`)) { + // ensure we form an absolute path + u.Path, _ = filepath.Abs(strings.TrimLeft(u.Path, `/`)) + if !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + } + u.Path = strings.ToLower(u.Path) + } + + // NOTE: lower case normalization does not propagate to inner resources, + // generated when rebasing: when joining a relative URI with a file to an absolute base, + // only the base is currently lower-cased. + // + // For now, we assume this is good enough for most use cases + // and try not to generate too many differences + // between the output produced on different platforms. + u.Path = path.Clean(strings.ReplaceAll(u.Path, `\`, `/`)) +} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go new file mode 100644 index 0000000000..a69cca8814 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -0,0 +1,400 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "sort" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +func init() { + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) +} + +// OperationProps describes an operation +// +// NOTES: +// - schemes, when present must be from [http, https, ws, wss]: see validate +// - Security is handled as a special case: see MarshalJSON function +type OperationProps struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitempty"` +} + +// MarshalJSON takes care of serializing operation properties to JSON +// +// We use a custom marhaller here to handle a special cases related to +// the Security field. We need to preserve zero length slice +// while omitting the field when the value is nil/unset. +func (op OperationProps) MarshalJSON() ([]byte, error) { + type Alias OperationProps + if op.Security == nil { + return json.Marshal(&struct { + Security []map[string][]string `json:"security,omitempty"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) + } + return json.Marshal(&struct { + Security []map[string][]string `json:"security"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) +} + +// Operation describes a single API operation on a path. +// +// For more information: http://goo.gl/8us55a#operationObject +type Operation struct { + VendorExtensible + OperationProps +} + +// SuccessResponse gets a success response model +func (o *Operation) SuccessResponse() (*Response, int, bool) { + if o.Responses == nil { + return nil, 0, false + } + + responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) + for k := range o.Responses.StatusCodeResponses { + if k >= 200 && k < 300 { + responseCodes = append(responseCodes, k) + } + } + if len(responseCodes) > 0 { + sort.Ints(responseCodes) + v := o.Responses.StatusCodeResponses[responseCodes[0]] + return &v, responseCodes[0], true + } + + return o.Responses.Default, 0, false +} + +// JSONLookup look up a value by the json property name +func (o Operation) JSONLookup(token string) (interface{}, error) { + if ex, ok := o.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(o.OperationProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *Operation) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &o.OperationProps); err != nil { + return err + } + return json.Unmarshal(data, &o.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (o Operation) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(o.OperationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// NewOperation creates a new operation instance. +// It expects an ID as parameter but not passing an ID is also valid. +func NewOperation(id string) *Operation { + op := new(Operation) + op.ID = id + return op +} + +// WithID sets the ID property on this operation, allows for chaining. +func (o *Operation) WithID(id string) *Operation { + o.ID = id + return o +} + +// WithDescription sets the description on this operation, allows for chaining +func (o *Operation) WithDescription(description string) *Operation { + o.Description = description + return o +} + +// WithSummary sets the summary on this operation, allows for chaining +func (o *Operation) WithSummary(summary string) *Operation { + o.Summary = summary + return o +} + +// WithExternalDocs sets/removes the external docs for/from this operation. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (o *Operation) WithExternalDocs(description, url string) *Operation { + if description == "" && url == "" { + o.ExternalDocs = nil + return o + } + + if o.ExternalDocs == nil { + o.ExternalDocs = &ExternalDocumentation{} + } + o.ExternalDocs.Description = description + o.ExternalDocs.URL = url + return o +} + +// Deprecate marks the operation as deprecated +func (o *Operation) Deprecate() *Operation { + o.Deprecated = true + return o +} + +// Undeprecate marks the operation as not deprected +func (o *Operation) Undeprecate() *Operation { + o.Deprecated = false + return o +} + +// WithConsumes adds media types for incoming body values +func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { + o.Consumes = append(o.Consumes, mediaTypes...) + return o +} + +// WithProduces adds media types for outgoing body values +func (o *Operation) WithProduces(mediaTypes ...string) *Operation { + o.Produces = append(o.Produces, mediaTypes...) + return o +} + +// WithTags adds tags for this operation +func (o *Operation) WithTags(tags ...string) *Operation { + o.Tags = append(o.Tags, tags...) + return o +} + +// AddParam adds a parameter to this operation, when a parameter for that location +// and with that name already exists it will be replaced +func (o *Operation) AddParam(param *Parameter) *Operation { + if param == nil { + return o + } + + for i, p := range o.Parameters { + if p.Name == param.Name && p.In == param.In { + params := make([]Parameter, 0, len(o.Parameters)+1) + params = append(params, o.Parameters[:i]...) + params = append(params, *param) + params = append(params, o.Parameters[i+1:]...) + o.Parameters = params + + return o + } + } + + o.Parameters = append(o.Parameters, *param) + return o +} + +// RemoveParam removes a parameter from the operation +func (o *Operation) RemoveParam(name, in string) *Operation { + for i, p := range o.Parameters { + if p.Name == name && p.In == in { + o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) + return o + } + } + return o +} + +// SecuredWith adds a security scope to this operation. +func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { + o.Security = append(o.Security, map[string][]string{name: scopes}) + return o +} + +// WithDefaultResponse adds a default response to the operation. +// Passing a nil value will remove the response +func (o *Operation) WithDefaultResponse(response *Response) *Operation { + return o.RespondsWith(0, response) +} + +// RespondsWith adds a status code response to the operation. +// When the code is 0 the value of the response will be used as default response value. +// When the value of the response is nil it will be removed from the operation +func (o *Operation) RespondsWith(code int, response *Response) *Operation { + if o.Responses == nil { + o.Responses = new(Responses) + } + if code == 0 { + o.Responses.Default = response + return o + } + if response == nil { + delete(o.Responses.StatusCodeResponses, code) + return o + } + if o.Responses.StatusCodeResponses == nil { + o.Responses.StatusCodeResponses = make(map[int]Response) + } + o.Responses.StatusCodeResponses[code] = *response + return o +} + +type opsAlias OperationProps + +type gobAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *opsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (o Operation) GobEncode() ([]byte, error) { + raw := struct { + Ext VendorExtensible + Props OperationProps + }{ + Ext: o.VendorExtensible, + Props: o.OperationProps, + } + var b bytes.Buffer + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (o *Operation) GobDecode(b []byte) error { + var raw struct { + Ext VendorExtensible + Props OperationProps + } + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + o.VendorExtensible = raw.Ext + o.OperationProps = raw.Props + return nil +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (op OperationProps) GobEncode() ([]byte, error) { + raw := gobAlias{ + Alias: (*opsAlias)(&op), + } + + var b bytes.Buffer + if op.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(op.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(op.Security)) + for _, req := range op.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (op *OperationProps) GobDecode(b []byte) error { + var raw gobAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *op = *(*OperationProps)(raw.Alias) + return nil +} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go new file mode 100644 index 0000000000..bd4f1cdb07 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -0,0 +1,326 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// QueryParam creates a query parameter +func QueryParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} +} + +// HeaderParam creates a header parameter, this is always required by default +func HeaderParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} +} + +// PathParam creates a path parameter, this is always required +func PathParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} +} + +// BodyParam creates a body parameter +func BodyParam(name string, schema *Schema) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}} +} + +// FormDataParam creates a body parameter +func FormDataParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} +} + +// FileParam creates a body parameter +func FileParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, + SimpleSchema: SimpleSchema{Type: "file"}} +} + +// SimpleArrayParam creates a param for a simple array (string, int, date etc) +func SimpleArrayParam(name, tpe, fmt string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name}, + SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", + Items: &Items{SimpleSchema: SimpleSchema{Type: tpe, Format: fmt}}}} +} + +// ParamRef creates a parameter that's a json reference +func ParamRef(uri string) *Parameter { + p := new(Parameter) + p.Ref = MustCreateRef(uri) + return p +} + +// ParamProps describes the specific attributes of an operation parameter +// +// NOTE: +// - Schema is defined when "in" == "body": see validate +// - AllowEmptyValue is allowed where "in" == "query" || "formData" +type ParamProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitempty"` + Schema *Schema `json:"schema,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` +} + +// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). +// +// There are five possible parameter types. +// - Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// - Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// - Header - Custom headers that are expected as part of the request. +// - Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// - Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// - `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// - `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. +// +// For more information: http://goo.gl/8us55a#parameterObject +type Parameter struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible + ParamProps +} + +// JSONLookup look up a value by the json property name +func (p Parameter) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.ParamProps, token) + return r, err +} + +// WithDescription a fluent builder method for the description of the parameter +func (p *Parameter) WithDescription(description string) *Parameter { + p.Description = description + return p +} + +// Named a fluent builder method to override the name of the parameter +func (p *Parameter) Named(name string) *Parameter { + p.Name = name + return p +} + +// WithLocation a fluent builder method to override the location of the parameter +func (p *Parameter) WithLocation(in string) *Parameter { + p.In = in + return p +} + +// Typed a fluent builder method for the type of the parameter value +func (p *Parameter) Typed(tpe, format string) *Parameter { + p.Type = tpe + p.Format = format + return p +} + +// CollectionOf a fluent builder method for an array parameter +func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { + p.Type = jsonArray + p.Items = items + p.CollectionFormat = format + return p +} + +// WithDefault sets the default value on this parameter +func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { + p.AsOptional() // with default implies optional + p.Default = defaultValue + return p +} + +// AllowsEmptyValues flags this parameter as being ok with empty values +func (p *Parameter) AllowsEmptyValues() *Parameter { + p.AllowEmptyValue = true + return p +} + +// NoEmptyValues flags this parameter as not liking empty values +func (p *Parameter) NoEmptyValues() *Parameter { + p.AllowEmptyValue = false + return p +} + +// AsOptional flags this parameter as optional +func (p *Parameter) AsOptional() *Parameter { + p.Required = false + return p +} + +// AsRequired flags this parameter as required +func (p *Parameter) AsRequired() *Parameter { + if p.Default != nil { // with a default required makes no sense + return p + } + p.Required = true + return p +} + +// WithMaxLength sets a max length value +func (p *Parameter) WithMaxLength(max int64) *Parameter { + p.MaxLength = &max + return p +} + +// WithMinLength sets a min length value +func (p *Parameter) WithMinLength(min int64) *Parameter { + p.MinLength = &min + return p +} + +// WithPattern sets a pattern value +func (p *Parameter) WithPattern(pattern string) *Parameter { + p.Pattern = pattern + return p +} + +// WithMultipleOf sets a multiple of value +func (p *Parameter) WithMultipleOf(number float64) *Parameter { + p.MultipleOf = &number + return p +} + +// WithMaximum sets a maximum number value +func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { + p.Maximum = &max + p.ExclusiveMaximum = exclusive + return p +} + +// WithMinimum sets a minimum number value +func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { + p.Minimum = &min + p.ExclusiveMinimum = exclusive + return p +} + +// WithEnum sets a the enum values (replace) +func (p *Parameter) WithEnum(values ...interface{}) *Parameter { + p.Enum = append([]interface{}{}, values...) + return p +} + +// WithMaxItems sets the max items +func (p *Parameter) WithMaxItems(size int64) *Parameter { + p.MaxItems = &size + return p +} + +// WithMinItems sets the min items +func (p *Parameter) WithMinItems(size int64) *Parameter { + p.MinItems = &size + return p +} + +// UniqueValues dictates that this array can only have unique items +func (p *Parameter) UniqueValues() *Parameter { + p.UniqueItems = true + return p +} + +// AllowDuplicates this array can have duplicates +func (p *Parameter) AllowDuplicates() *Parameter { + p.UniqueItems = false + return p +} + +// WithValidations is a fluent method to set parameter validations +func (p *Parameter) WithValidations(val CommonValidations) *Parameter { + p.SetValidations(SchemaValidations{CommonValidations: val}) + return p +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Parameter) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.ParamProps) +} + +// MarshalJSON converts this items object to JSON +func (p Parameter) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.ParamProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b3, b1, b2, b4, b5), nil +} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go new file mode 100644 index 0000000000..68fc8e9014 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -0,0 +1,87 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// PathItemProps the path item specific properties +type PathItemProps struct { + Get *Operation `json:"get,omitempty"` + Put *Operation `json:"put,omitempty"` + Post *Operation `json:"post,omitempty"` + Delete *Operation `json:"delete,omitempty"` + Options *Operation `json:"options,omitempty"` + Head *Operation `json:"head,omitempty"` + Patch *Operation `json:"patch,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` +} + +// PathItem describes the operations available on a single path. +// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// The path itself is still exposed to the documentation viewer but they will +// not know which operations and parameters are available. +// +// For more information: http://goo.gl/8us55a#pathItemObject +type PathItem struct { + Refable + VendorExtensible + PathItemProps +} + +// JSONLookup look up a value by the json property name +func (p PathItem) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *PathItem) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.PathItemProps) +} + +// MarshalJSON converts this items object to JSON +func (p PathItem) MarshalJSON() ([]byte, error) { + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.PathItemProps) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b3, b4, b5) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go new file mode 100644 index 0000000000..9dc82a2901 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/paths.go @@ -0,0 +1,97 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/swag" +) + +// Paths holds the relative paths to the individual endpoints. +// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order +// to construct the full URL. +// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// +// For more information: http://goo.gl/8us55a#pathsObject +type Paths struct { + VendorExtensible + Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" +} + +// JSONLookup look up a value by the json property name +func (p Paths) JSONLookup(token string) (interface{}, error) { + if pi, ok := p.Paths[token]; ok { + return &pi, nil + } + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Paths) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + for k, v := range res { + if strings.HasPrefix(strings.ToLower(k), "x-") { + if p.Extensions == nil { + p.Extensions = make(map[string]interface{}) + } + var d interface{} + if err := json.Unmarshal(v, &d); err != nil { + return err + } + p.Extensions[k] = d + } + if strings.HasPrefix(k, "/") { + if p.Paths == nil { + p.Paths = make(map[string]PathItem) + } + var pi PathItem + if err := json.Unmarshal(v, &pi); err != nil { + return err + } + p.Paths[k] = pi + } + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (p Paths) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + + pths := make(map[string]PathItem) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go new file mode 100644 index 0000000000..91d2435f01 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/properties.go @@ -0,0 +1,91 @@ +package spec + +import ( + "bytes" + "encoding/json" + "reflect" + "sort" +) + +// OrderSchemaItem holds a named schema (e.g. from a property of an object) +type OrderSchemaItem struct { + Name string + Schema +} + +// OrderSchemaItems is a sortable slice of named schemas. +// The ordering is defined by the x-order schema extension. +type OrderSchemaItems []OrderSchemaItem + +// MarshalJSON produces a json object with keys defined by the name schemas +// of the OrderSchemaItems slice, keeping the original order of the slice. +func (items OrderSchemaItems) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(nil) + buf.WriteString("{") + for i := range items { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString("\"") + buf.WriteString(items[i].Name) + buf.WriteString("\":") + bs, err := json.Marshal(&items[i].Schema) + if err != nil { + return nil, err + } + buf.Write(bs) + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +func (items OrderSchemaItems) Len() int { return len(items) } +func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] } +func (items OrderSchemaItems) Less(i, j int) (ret bool) { + ii, oki := items[i].Extensions.GetInt("x-order") + ij, okj := items[j].Extensions.GetInt("x-order") + if oki { + if okj { + defer func() { + if err := recover(); err != nil { + defer func() { + if err = recover(); err != nil { + ret = items[i].Name < items[j].Name + } + }() + ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String() + } + }() + return ii < ij + } + return true + } else if okj { + return false + } + return items[i].Name < items[j].Name +} + +// SchemaProperties is a map representing the properties of a Schema object. +// It knows how to transform its keys into an ordered slice. +type SchemaProperties map[string]Schema + +// ToOrderedSchemaItems transforms the map of properties into a sortable slice +func (properties SchemaProperties) ToOrderedSchemaItems() OrderSchemaItems { + items := make(OrderSchemaItems, 0, len(properties)) + for k, v := range properties { + items = append(items, OrderSchemaItem{ + Name: k, + Schema: v, + }) + } + sort.Sort(items) + return items +} + +// MarshalJSON produces properties as json, keeping their order. +func (properties SchemaProperties) MarshalJSON() ([]byte, error) { + if properties == nil { + return []byte("null"), nil + } + return json.Marshal(properties.ToOrderedSchemaItems()) +} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go new file mode 100644 index 0000000000..b0ef9bd9c9 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -0,0 +1,193 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "net/http" + "os" + "path/filepath" + + "github.com/go-openapi/jsonreference" +) + +// Refable is a struct for things that accept a $ref property +type Refable struct { + Ref Ref +} + +// MarshalJSON marshals the ref to json +func (r Refable) MarshalJSON() ([]byte, error) { + return r.Ref.MarshalJSON() +} + +// UnmarshalJSON unmarshalss the ref from json +func (r *Refable) UnmarshalJSON(d []byte) error { + return json.Unmarshal(d, &r.Ref) +} + +// Ref represents a json reference that is potentially resolved +type Ref struct { + jsonreference.Ref +} + +// RemoteURI gets the remote uri part of the ref +func (r *Ref) RemoteURI() string { + if r.String() == "" { + return "" + } + + u := *r.GetURL() + u.Fragment = "" + return u.String() +} + +// IsValidURI returns true when the url the ref points to can be found +func (r *Ref) IsValidURI(basepaths ...string) bool { + if r.String() == "" { + return true + } + + v := r.RemoteURI() + if v == "" { + return true + } + + if r.HasFullURL { + //nolint:noctx,gosec + rr, err := http.Get(v) + if err != nil { + return false + } + defer rr.Body.Close() + + return rr.StatusCode/100 == 2 + } + + if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { + return false + } + + // check for local file + pth := v + if r.HasURLPathOnly { + base := "." + if len(basepaths) > 0 { + base = filepath.Dir(filepath.Join(basepaths...)) + } + p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) + if e != nil { + return false + } + pth = p + } + + fi, err := os.Stat(filepath.ToSlash(pth)) + if err != nil { + return false + } + + return !fi.IsDir() +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + ref, err := r.Ref.Inherits(child.Ref) + if err != nil { + return nil, err + } + return &Ref{Ref: *ref}, nil +} + +// NewRef creates a new instance of a ref object +// returns an error when the reference uri is an invalid uri +func NewRef(refURI string) (Ref, error) { + ref, err := jsonreference.New(refURI) + if err != nil { + return Ref{}, err + } + return Ref{Ref: ref}, nil +} + +// MustCreateRef creates a ref object but panics when refURI is invalid. +// Use the NewRef method for a version that returns an error. +func MustCreateRef(refURI string) Ref { + return Ref{Ref: jsonreference.MustCreateRef(refURI)} +} + +// MarshalJSON marshals this ref into a JSON object +func (r Ref) MarshalJSON() ([]byte, error) { + str := r.String() + if str == "" { + if r.IsRoot() { + return []byte(`{"$ref":""}`), nil + } + return []byte("{}"), nil + } + v := map[string]interface{}{"$ref": str} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshals this ref from a JSON object +func (r *Ref) UnmarshalJSON(d []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(d, &v); err != nil { + return err + } + return r.fromMap(v) +} + +// GobEncode provides a safe gob encoder for Ref +func (r Ref) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw, err := r.MarshalJSON() + if err != nil { + return nil, err + } + err = gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Ref +func (r *Ref) GobDecode(b []byte) error { + var raw []byte + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + return json.Unmarshal(raw, r) +} + +func (r *Ref) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *r = Ref{Ref: ref} + } + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/resolver.go b/vendor/github.com/go-openapi/spec/resolver.go new file mode 100644 index 0000000000..47d1ee13fc --- /dev/null +++ b/vendor/github.com/go-openapi/spec/resolver.go @@ -0,0 +1,127 @@ +package spec + +import ( + "fmt" + + "github.com/go-openapi/swag" +) + +func resolveAnyWithBase(root interface{}, ref *Ref, result interface{}, options *ExpandOptions) error { + options = optionsOrDefault(options) + resolver := defaultSchemaLoader(root, options, nil, nil) + + if err := resolver.Resolve(ref, result, options.RelativeBase); err != nil { + return err + } + + return nil +} + +// ResolveRefWithBase resolves a reference against a context root with preservation of base path +func ResolveRefWithBase(root interface{}, ref *Ref, options *ExpandOptions) (*Schema, error) { + result := new(Schema) + + if err := resolveAnyWithBase(root, ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveRef resolves a reference for a schema against a context root +// ref is guaranteed to be in root (no need to go to external files) +// +// ResolveRef is ONLY called from the code generation module +func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { + res, _, err := ref.GetPointer().Get(root) + if err != nil { + return nil, err + } + + switch sch := res.(type) { + case Schema: + return &sch, nil + case *Schema: + return sch, nil + case map[string]interface{}: + newSch := new(Schema) + if err = swag.DynamicJSONToStruct(sch, newSch); err != nil { + return nil, err + } + return newSch, nil + default: + return nil, fmt.Errorf("type: %T: %w", sch, ErrUnknownTypeForReference) + } +} + +// ResolveParameterWithBase resolves a parameter reference against a context root and base path +func ResolveParameterWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Parameter, error) { + result := new(Parameter) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveParameter resolves a parameter reference against a context root +func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { + return ResolveParameterWithBase(root, ref, nil) +} + +// ResolveResponseWithBase resolves response a reference against a context root and base path +func ResolveResponseWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Response, error) { + result := new(Response) + + err := resolveAnyWithBase(root, &ref, result, options) + if err != nil { + return nil, err + } + + return result, nil +} + +// ResolveResponse resolves response a reference against a context root +func ResolveResponse(root interface{}, ref Ref) (*Response, error) { + return ResolveResponseWithBase(root, ref, nil) +} + +// ResolvePathItemWithBase resolves response a path item against a context root and base path +func ResolvePathItemWithBase(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { + result := new(PathItem) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolvePathItem resolves response a path item against a context root and base path +// +// Deprecated: use ResolvePathItemWithBase instead +func ResolvePathItem(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { + return ResolvePathItemWithBase(root, ref, options) +} + +// ResolveItemsWithBase resolves parameter items reference against a context root and base path. +// +// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. +// Similarly, $ref are forbidden in response headers. +func ResolveItemsWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { + result := new(Items) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveItems resolves parameter items reference against a context root and base path. +// +// Deprecated: use ResolveItemsWithBase instead +func ResolveItems(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { + return ResolveItemsWithBase(root, ref, options) +} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go new file mode 100644 index 0000000000..0340b60d84 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/response.go @@ -0,0 +1,152 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// ResponseProps properties specific to a response +type ResponseProps struct { + Description string `json:"description"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + +// Response describes a single response from an API Operation. +// +// For more information: http://goo.gl/8us55a#responseObject +type Response struct { + Refable + ResponseProps + VendorExtensible +} + +// JSONLookup look up a value by the json property name +func (r Response) JSONLookup(token string) (interface{}, error) { + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if token == "$ref" { + return &r.Ref, nil + } + ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) + return ptr, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Response) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + return json.Unmarshal(data, &r.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (r Response) MarshalJSON() ([]byte, error) { + var ( + b1 []byte + err error + ) + + if r.Ref.String() == "" { + // when there is no $ref, empty description is rendered as an empty string + b1, err = json.Marshal(r.ResponseProps) + } else { + // when there is $ref inside the schema, description should be omitempty-ied + b1, err = json.Marshal(struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` + }{ + Description: r.ResponseProps.Description, + Schema: r.ResponseProps.Schema, + Examples: r.ResponseProps.Examples, + }) + } + if err != nil { + return nil, err + } + + b2, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// NewResponse creates a new response instance +func NewResponse() *Response { + return new(Response) +} + +// ResponseRef creates a response as a json reference +func ResponseRef(url string) *Response { + resp := NewResponse() + resp.Ref = MustCreateRef(url) + return resp +} + +// WithDescription sets the description on this response, allows for chaining +func (r *Response) WithDescription(description string) *Response { + r.Description = description + return r +} + +// WithSchema sets the schema on this response, allows for chaining. +// Passing a nil argument removes the schema from this response +func (r *Response) WithSchema(schema *Schema) *Response { + r.Schema = schema + return r +} + +// AddHeader adds a header to this response +func (r *Response) AddHeader(name string, header *Header) *Response { + if header == nil { + return r.RemoveHeader(name) + } + if r.Headers == nil { + r.Headers = make(map[string]Header) + } + r.Headers[name] = *header + return r +} + +// RemoveHeader removes a header from this response +func (r *Response) RemoveHeader(name string) *Response { + delete(r.Headers, name) + return r +} + +// AddExample adds an example to this response +func (r *Response) AddExample(mediaType string, example interface{}) *Response { + if r.Examples == nil { + r.Examples = make(map[string]interface{}) + } + r.Examples[mediaType] = example + return r +} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go new file mode 100644 index 0000000000..16c3076fe8 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -0,0 +1,140 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/swag" +) + +// Responses is a container for the expected responses of an operation. +// The container maps a HTTP response code to the expected response. +// It is not expected from the documentation to necessarily cover all possible HTTP response codes, +// since they may not be known in advance. However, it is expected from the documentation to cover +// a successful operation response and any known errors. +// +// The `default` can be used a default response object for all HTTP codes that are not covered +// individually by the specification. +// +// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response +// for a successful operation call. +// +// For more information: http://goo.gl/8us55a#responsesObject +type Responses struct { + VendorExtensible + ResponsesProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (r Responses) JSONLookup(token string) (interface{}, error) { + if token == "default" { + return r.Default, nil + } + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if i, err := strconv.Atoi(token); err == nil { + if scr, ok := r.StatusCodeResponses[i]; ok { + return scr, nil + } + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Responses) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { + return err + } + + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { + r.ResponsesProps = ResponsesProps{} + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (r Responses) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponsesProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// ResponsesProps describes all responses for an operation. +// It tells what is the default response and maps all responses with a +// HTTP status code. +type ResponsesProps struct { + Default *Response + StatusCodeResponses map[int]Response +} + +// MarshalJSON marshals responses as JSON +func (r ResponsesProps) MarshalJSON() ([]byte, error) { + toser := map[string]Response{} + if r.Default != nil { + toser["default"] = *r.Default + } + for k, v := range r.StatusCodeResponses { + toser[strconv.Itoa(k)] = v + } + return json.Marshal(toser) +} + +// UnmarshalJSON unmarshals responses from JSON +func (r *ResponsesProps) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + if v, ok := res["default"]; ok { + var defaultRes Response + if err := json.Unmarshal(v, &defaultRes); err != nil { + return err + } + r.Default = &defaultRes + delete(res, "default") + } + for k, v := range res { + if !strings.HasPrefix(k, "x-") { + var statusCodeResp Response + if err := json.Unmarshal(v, &statusCodeResp); err != nil { + return err + } + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = statusCodeResp + } + } + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go new file mode 100644 index 0000000000..4e9be8576b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -0,0 +1,645 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// BooleanProperty creates a boolean property +func BooleanProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} +} + +// BoolProperty creates a boolean property +func BoolProperty() *Schema { return BooleanProperty() } + +// StringProperty creates a string property +func StringProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// CharProperty creates a string property +func CharProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// Float64Property creates a float64/double property +func Float64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} +} + +// Float32Property creates a float32/float property +func Float32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} +} + +// Int8Property creates an int8 property +func Int8Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} +} + +// Int16Property creates an int16 property +func Int16Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} +} + +// Int32Property creates an int32 property +func Int32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} +} + +// Int64Property creates an int64 property +func Int64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} +} + +// StrFmtProperty creates a property for the named string format +func StrFmtProperty(format string) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} +} + +// DateProperty creates a date property +func DateProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} +} + +// DateTimeProperty creates a date time property +func DateTimeProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} +} + +// MapProperty creates a map property +func MapProperty(property *Schema) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} +} + +// RefProperty creates a ref property +func RefProperty(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// RefSchema creates a ref property +func RefSchema(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// ArrayProperty creates an array property +func ArrayProperty(items *Schema) *Schema { + if items == nil { + return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} + } + return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} +} + +// ComposedSchema creates a schema with allOf +func ComposedSchema(schemas ...Schema) *Schema { + s := new(Schema) + s.AllOf = schemas + return s +} + +// SchemaURL represents a schema url +type SchemaURL string + +// MarshalJSON marshal this to JSON +func (r SchemaURL) MarshalJSON() ([]byte, error) { + if r == "" { + return []byte("{}"), nil + } + v := map[string]interface{}{"$schema": string(r)} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshal this from JSON +func (r *SchemaURL) UnmarshalJSON(data []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(data, &v); err != nil { + return err + } + return r.fromMap(v) +} + +func (r *SchemaURL) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + if vv, ok := v["$schema"]; ok { + if str, ok := vv.(string); ok { + u, err := parseURL(str) + if err != nil { + return err + } + + *r = SchemaURL(u.String()) + } + } + return nil +} + +// SchemaProps describes a JSON schema (draft 4) +type SchemaProps struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitempty"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitempty"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitempty"` + Properties SchemaProperties `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` + PatternProperties SchemaProperties `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` + Definitions Definitions `json:"definitions,omitempty"` +} + +// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) +type SwaggerSchemaProps struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitempty"` + XML *XMLObject `json:"xml,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// Schema the schema object allows the definition of input and output data types. +// These types can be objects, but also primitives and arrays. +// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) +// and uses a predefined subset of it. +// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. +// +// For more information: http://goo.gl/8us55a#schemaObject +type Schema struct { + VendorExtensible + SchemaProps + SwaggerSchemaProps + ExtraProps map[string]interface{} `json:"-"` +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s Schema) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + if ex, ok := s.ExtraProps[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) + if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { + return r, err + } + r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) + return r, err +} + +// WithID sets the id for this schema, allows for chaining +func (s *Schema) WithID(id string) *Schema { + s.ID = id + return s +} + +// WithTitle sets the title for this schema, allows for chaining +func (s *Schema) WithTitle(title string) *Schema { + s.Title = title + return s +} + +// WithDescription sets the description for this schema, allows for chaining +func (s *Schema) WithDescription(description string) *Schema { + s.Description = description + return s +} + +// WithProperties sets the properties for this schema +func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { + s.Properties = schemas + return s +} + +// SetProperty sets a property on this schema +func (s *Schema) SetProperty(name string, schema Schema) *Schema { + if s.Properties == nil { + s.Properties = make(map[string]Schema) + } + s.Properties[name] = schema + return s +} + +// WithAllOf sets the all of property +func (s *Schema) WithAllOf(schemas ...Schema) *Schema { + s.AllOf = schemas + return s +} + +// WithMaxProperties sets the max number of properties an object can have +func (s *Schema) WithMaxProperties(max int64) *Schema { + s.MaxProperties = &max + return s +} + +// WithMinProperties sets the min number of properties an object must have +func (s *Schema) WithMinProperties(min int64) *Schema { + s.MinProperties = &min + return s +} + +// Typed sets the type of this schema for a single value item +func (s *Schema) Typed(tpe, format string) *Schema { + s.Type = []string{tpe} + s.Format = format + return s +} + +// AddType adds a type with potential format to the types for this schema +func (s *Schema) AddType(tpe, format string) *Schema { + s.Type = append(s.Type, tpe) + if format != "" { + s.Format = format + } + return s +} + +// AsNullable flags this schema as nullable. +func (s *Schema) AsNullable() *Schema { + s.Nullable = true + return s +} + +// CollectionOf a fluent builder method for an array parameter +func (s *Schema) CollectionOf(items Schema) *Schema { + s.Type = []string{jsonArray} + s.Items = &SchemaOrArray{Schema: &items} + return s +} + +// WithDefault sets the default value on this parameter +func (s *Schema) WithDefault(defaultValue interface{}) *Schema { + s.Default = defaultValue + return s +} + +// WithRequired flags this parameter as required +func (s *Schema) WithRequired(items ...string) *Schema { + s.Required = items + return s +} + +// AddRequired adds field names to the required properties array +func (s *Schema) AddRequired(items ...string) *Schema { + s.Required = append(s.Required, items...) + return s +} + +// WithMaxLength sets a max length value +func (s *Schema) WithMaxLength(max int64) *Schema { + s.MaxLength = &max + return s +} + +// WithMinLength sets a min length value +func (s *Schema) WithMinLength(min int64) *Schema { + s.MinLength = &min + return s +} + +// WithPattern sets a pattern value +func (s *Schema) WithPattern(pattern string) *Schema { + s.Pattern = pattern + return s +} + +// WithMultipleOf sets a multiple of value +func (s *Schema) WithMultipleOf(number float64) *Schema { + s.MultipleOf = &number + return s +} + +// WithMaximum sets a maximum number value +func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { + s.Maximum = &max + s.ExclusiveMaximum = exclusive + return s +} + +// WithMinimum sets a minimum number value +func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { + s.Minimum = &min + s.ExclusiveMinimum = exclusive + return s +} + +// WithEnum sets a the enum values (replace) +func (s *Schema) WithEnum(values ...interface{}) *Schema { + s.Enum = append([]interface{}{}, values...) + return s +} + +// WithMaxItems sets the max items +func (s *Schema) WithMaxItems(size int64) *Schema { + s.MaxItems = &size + return s +} + +// WithMinItems sets the min items +func (s *Schema) WithMinItems(size int64) *Schema { + s.MinItems = &size + return s +} + +// UniqueValues dictates that this array can only have unique items +func (s *Schema) UniqueValues() *Schema { + s.UniqueItems = true + return s +} + +// AllowDuplicates this array can have duplicates +func (s *Schema) AllowDuplicates() *Schema { + s.UniqueItems = false + return s +} + +// AddToAllOf adds a schema to the allOf property +func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { + s.AllOf = append(s.AllOf, schemas...) + return s +} + +// WithDiscriminator sets the name of the discriminator field +func (s *Schema) WithDiscriminator(discriminator string) *Schema { + s.Discriminator = discriminator + return s +} + +// AsReadOnly flags this schema as readonly +func (s *Schema) AsReadOnly() *Schema { + s.ReadOnly = true + return s +} + +// AsWritable flags this schema as writeable (not read-only) +func (s *Schema) AsWritable() *Schema { + s.ReadOnly = false + return s +} + +// WithExample sets the example for this schema +func (s *Schema) WithExample(example interface{}) *Schema { + s.Example = example + return s +} + +// WithExternalDocs sets/removes the external docs for/from this schema. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (s *Schema) WithExternalDocs(description, url string) *Schema { + if description == "" && url == "" { + s.ExternalDocs = nil + return s + } + + if s.ExternalDocs == nil { + s.ExternalDocs = &ExternalDocumentation{} + } + s.ExternalDocs.Description = description + s.ExternalDocs.URL = url + return s +} + +// WithXMLName sets the xml name for the object +func (s *Schema) WithXMLName(name string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Name = name + return s +} + +// WithXMLNamespace sets the xml namespace for the object +func (s *Schema) WithXMLNamespace(namespace string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Namespace = namespace + return s +} + +// WithXMLPrefix sets the xml prefix for the object +func (s *Schema) WithXMLPrefix(prefix string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Prefix = prefix + return s +} + +// AsXMLAttribute flags this object as xml attribute +func (s *Schema) AsXMLAttribute() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = true + return s +} + +// AsXMLElement flags this object as an xml node +func (s *Schema) AsXMLElement() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = false + return s +} + +// AsWrappedXML flags this object as wrapped, this is mostly useful for array types +func (s *Schema) AsWrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = true + return s +} + +// AsUnwrappedXML flags this object as an xml node +func (s *Schema) AsUnwrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = false + return s +} + +// SetValidations defines all schema validations. +// +// NOTE: Required, ReadOnly, AllOf, AnyOf, OneOf and Not are not considered. +func (s *Schema) SetValidations(val SchemaValidations) { + s.Maximum = val.Maximum + s.ExclusiveMaximum = val.ExclusiveMaximum + s.Minimum = val.Minimum + s.ExclusiveMinimum = val.ExclusiveMinimum + s.MaxLength = val.MaxLength + s.MinLength = val.MinLength + s.Pattern = val.Pattern + s.MaxItems = val.MaxItems + s.MinItems = val.MinItems + s.UniqueItems = val.UniqueItems + s.MultipleOf = val.MultipleOf + s.Enum = val.Enum + s.MinProperties = val.MinProperties + s.MaxProperties = val.MaxProperties + s.PatternProperties = val.PatternProperties +} + +// WithValidations is a fluent method to set schema validations +func (s *Schema) WithValidations(val SchemaValidations) *Schema { + s.SetValidations(val) + return s +} + +// Validations returns a clone of the validations for this schema +func (s Schema) Validations() SchemaValidations { + return SchemaValidations{ + CommonValidations: CommonValidations{ + Maximum: s.Maximum, + ExclusiveMaximum: s.ExclusiveMaximum, + Minimum: s.Minimum, + ExclusiveMinimum: s.ExclusiveMinimum, + MaxLength: s.MaxLength, + MinLength: s.MinLength, + Pattern: s.Pattern, + MaxItems: s.MaxItems, + MinItems: s.MinItems, + UniqueItems: s.UniqueItems, + MultipleOf: s.MultipleOf, + Enum: s.Enum, + }, + MinProperties: s.MinProperties, + MaxProperties: s.MaxProperties, + PatternProperties: s.PatternProperties, + } +} + +// MarshalJSON marshal this to JSON +func (s Schema) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SchemaProps) + if err != nil { + return nil, fmt.Errorf("schema props %v", err) + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, fmt.Errorf("vendor props %v", err) + } + b3, err := s.Ref.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("ref prop %v", err) + } + b4, err := s.Schema.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("schema prop %v", err) + } + b5, err := json.Marshal(s.SwaggerSchemaProps) + if err != nil { + return nil, fmt.Errorf("common validations %v", err) + } + var b6 []byte + if s.ExtraProps != nil { + jj, err := json.Marshal(s.ExtraProps) + if err != nil { + return nil, fmt.Errorf("extra props %v", err) + } + b6 = jj + } + return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *Schema) UnmarshalJSON(data []byte) error { + props := struct { + SchemaProps + SwaggerSchemaProps + }{} + if err := json.Unmarshal(data, &props); err != nil { + return err + } + + sch := Schema{ + SchemaProps: props.SchemaProps, + SwaggerSchemaProps: props.SwaggerSchemaProps, + } + + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + _ = sch.Ref.fromMap(d) + _ = sch.Schema.fromMap(d) + + delete(d, "$ref") + delete(d, "$schema") + for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { + delete(d, pn) + } + + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if sch.Extensions == nil { + sch.Extensions = map[string]interface{}{} + } + sch.Extensions[k] = vv + continue + } + if sch.ExtraProps == nil { + sch.ExtraProps = map[string]interface{}{} + } + sch.ExtraProps[k] = vv + } + + *s = sch + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go new file mode 100644 index 0000000000..0059b99aed --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -0,0 +1,331 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strings" + + "github.com/go-openapi/swag" +) + +// PathLoader is a function to use when loading remote refs. +// +// This is a package level default. It may be overridden or bypassed by +// specifying the loader in ExpandOptions. +// +// NOTE: if you are using the go-openapi/loads package, it will override +// this value with its own default (a loader to retrieve YAML documents as +// well as JSON ones). +var PathLoader = func(pth string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(pth) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil +} + +// resolverContext allows to share a context during spec processing. +// At the moment, it just holds the index of circular references found. +type resolverContext struct { + // circulars holds all visited circular references, to shortcircuit $ref resolution. + // + // This structure is privately instantiated and needs not be locked against + // concurrent access, unless we chose to implement a parallel spec walking. + circulars map[string]bool + basePath string + loadDoc func(string) (json.RawMessage, error) + rootID string +} + +func newResolverContext(options *ExpandOptions) *resolverContext { + expandOptions := optionsOrDefault(options) + + // path loader may be overridden by options + var loader func(string) (json.RawMessage, error) + if expandOptions.PathLoader == nil { + loader = PathLoader + } else { + loader = expandOptions.PathLoader + } + + return &resolverContext{ + circulars: make(map[string]bool), + basePath: expandOptions.RelativeBase, // keep the root base path in context + loadDoc: loader, + } +} + +type schemaLoader struct { + root interface{} + options *ExpandOptions + cache ResolutionCache + context *resolverContext +} + +func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) *schemaLoader { + if ref.IsRoot() || ref.HasFragmentOnly { + return r + } + + baseRef := MustCreateRef(basePath) + currentRef := normalizeRef(&ref, basePath) + if strings.HasPrefix(currentRef.String(), baseRef.String()) { + return r + } + + // set a new root against which to resolve + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := r.cache.Get(rootURL.String()) + + // shallow copy of resolver options to set a new RelativeBase when + // traversing multiple documents + newOptions := r.options + newOptions.RelativeBase = rootURL.String() + + return defaultSchemaLoader(root, newOptions, r.cache, r.context) +} + +func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { + if transitive != r { + if transitive.options != nil && transitive.options.RelativeBase != "" { + return normalizeBase(transitive.options.RelativeBase) + } + } + + return basePath +} + +func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { + tgt := reflect.ValueOf(target) + if tgt.Kind() != reflect.Ptr { + return ErrResolveRefNeedsAPointer + } + + if ref.GetURL() == nil { + return nil + } + + var ( + res interface{} + data interface{} + err error + ) + + // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means + // it is pointing somewhere in the root. + root := r.root + if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { + if baseRef, erb := NewRef(basePath); erb == nil { + root, _, _, _ = r.load(baseRef.GetURL()) + } + } + + if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { + data = root + } else { + baseRef := normalizeRef(ref, basePath) + data, _, _, err = r.load(baseRef.GetURL()) + if err != nil { + return err + } + } + + res = data + if ref.String() != "" { + res, _, err = ref.GetPointer().Get(data) + if err != nil { + return err + } + } + return swag.DynamicJSONToStruct(res, target) +} + +func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { + debugLog("loading schema from url: %s", refURL) + toFetch := *refURL + toFetch.Fragment = "" + + var err error + pth := toFetch.String() + normalized := normalizeBase(pth) + debugLog("loading doc from: %s", normalized) + + data, fromCache := r.cache.Get(normalized) + if fromCache { + return data, toFetch, fromCache, nil + } + + b, err := r.context.loadDoc(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + var doc interface{} + if err := json.Unmarshal(b, &doc); err != nil { + return nil, url.URL{}, false, err + } + r.cache.Set(normalized, doc) + + return doc, toFetch, fromCache, nil +} + +// isCircular detects cycles in sequences of $ref. +// +// It relies on a private context (which needs not be locked). +func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { + normalizedRef := normalizeURI(ref.String(), basePath) + if _, ok := r.context.circulars[normalizedRef]; ok { + // circular $ref has been already detected in another explored cycle + foundCycle = true + return + } + foundCycle = swag.ContainsStrings(parentRefs, normalizedRef) // normalized windows url's are lower cased + if foundCycle { + r.context.circulars[normalizedRef] = true + } + return +} + +// Resolve resolves a reference against basePath and stores the result in target. +// +// Resolve is not in charge of following references: it only resolves ref by following its URL. +// +// If the schema the ref is referring to holds nested refs, Resolve doesn't resolve them. +// +// If basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct +func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { + return r.resolveRef(ref, target, basePath) +} + +func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { + var ref *Ref + switch refable := input.(type) { + case *Schema: + ref = &refable.Ref + case *Parameter: + ref = &refable.Ref + case *Response: + ref = &refable.Ref + case *PathItem: + ref = &refable.Ref + default: + return fmt.Errorf("unsupported type: %T: %w", input, ErrDerefUnsupportedType) + } + + curRef := ref.String() + if curRef == "" { + return nil + } + + normalizedRef := normalizeRef(ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if r.isCircular(normalizedRef, basePath, parentRefs...) { + return nil + } + + if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { + return err + } + + if ref.String() == "" || ref.String() == curRef { + // done with rereferencing + return nil + } + + parentRefs = append(parentRefs, normalizedRef.String()) + return r.deref(input, parentRefs, normalizedBasePath) +} + +func (r *schemaLoader) shouldStopOnError(err error) bool { + if err != nil && !r.options.ContinueOnError { + return true + } + + if err != nil { + log.Println(err) + } + + return false +} + +func (r *schemaLoader) setSchemaID(target interface{}, id, basePath string) (string, string) { + debugLog("schema has ID: %s", id) + + // handling the case when id is a folder + // remember that basePath has to point to a file + var refPath string + if strings.HasSuffix(id, "/") { + // ensure this is detected as a file, not a folder + refPath = fmt.Sprintf("%s%s", id, "placeholder.json") + } else { + refPath = id + } + + // updates the current base path + // * important: ID can be a relative path + // * registers target to be fetchable from the new base proposed by this id + newBasePath := normalizeURI(refPath, basePath) + + // store found IDs for possible future reuse in $ref + r.cache.Set(newBasePath, target) + + // the root document has an ID: all $ref relative to that ID may + // be rebased relative to the root document + if basePath == r.context.basePath { + debugLog("root document is a schema with ID: %s (normalized as:%s)", id, newBasePath) + r.context.rootID = newBasePath + } + + return newBasePath, refPath +} + +func defaultSchemaLoader( + root interface{}, + expandOptions *ExpandOptions, + cache ResolutionCache, + context *resolverContext) *schemaLoader { + + if expandOptions == nil { + expandOptions = &ExpandOptions{} + } + + cache = cacheOrDefault(cache) + + if expandOptions.RelativeBase == "" { + // if no relative base is provided, assume the root document + // contains all $ref, or at least, that the relative documents + // may be resolved from the current working directory. + expandOptions.RelativeBase = baseForRoot(root, cache) + } + debugLog("effective expander options: %#v", expandOptions) + + if context == nil { + context = newResolverContext(expandOptions) + } + + return &schemaLoader{ + root: root, + options: expandOptions, + cache: cache, + context: context, + } +} diff --git a/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json new file mode 100644 index 0000000000..bcbb84743e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json @@ -0,0 +1,149 @@ +{ + "id": "http://json-schema.org/draft-04/schema#", + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "$schema": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/vendor/github.com/go-openapi/spec/schemas/v2/schema.json b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json new file mode 100644 index 0000000000..ebe10ed32d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json @@ -0,0 +1,1607 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for responses" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go new file mode 100644 index 0000000000..9d0bdae908 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -0,0 +1,170 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + basic = "basic" + apiKey = "apiKey" + oauth2 = "oauth2" + implicit = "implicit" + password = "password" + application = "application" + accessCode = "accessCode" +) + +// BasicAuth creates a basic auth security scheme +func BasicAuth() *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} +} + +// APIKeyAuth creates an api key auth security scheme +func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} +} + +// OAuth2Implicit creates an implicit flow oauth2 security scheme +func OAuth2Implicit(authorizationURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: implicit, + AuthorizationURL: authorizationURL, + }} +} + +// OAuth2Password creates a password flow oauth2 security scheme +func OAuth2Password(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: password, + TokenURL: tokenURL, + }} +} + +// OAuth2Application creates an application flow oauth2 security scheme +func OAuth2Application(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: application, + TokenURL: tokenURL, + }} +} + +// OAuth2AccessToken creates an access token flow oauth2 security scheme +func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: accessCode, + AuthorizationURL: authorizationURL, + TokenURL: tokenURL, + }} +} + +// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section +type SecuritySchemeProps struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 +} + +// AddScope adds a scope to this security scheme +func (s *SecuritySchemeProps) AddScope(scope, description string) { + if s.Scopes == nil { + s.Scopes = make(map[string]string) + } + s.Scopes[scope] = description +} + +// SecurityScheme allows the definition of a security scheme that can be used by the operations. +// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) +// and OAuth2's common flows (implicit, password, application and access code). +// +// For more information: http://goo.gl/8us55a#securitySchemeObject +type SecurityScheme struct { + VendorExtensible + SecuritySchemeProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (s SecurityScheme) MarshalJSON() ([]byte, error) { + var ( + b1 []byte + err error + ) + + if s.Type == oauth2 && (s.Flow == "implicit" || s.Flow == "accessCode") { + // when oauth2 for implicit or accessCode flows, empty AuthorizationURL is added as empty string + b1, err = json.Marshal(s.SecuritySchemeProps) + } else { + // when not oauth2, empty AuthorizationURL should be omitted + b1, err = json.Marshal(struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 + }{ + Description: s.Description, + Type: s.Type, + Name: s.Name, + In: s.In, + Flow: s.Flow, + AuthorizationURL: s.AuthorizationURL, + TokenURL: s.TokenURL, + Scopes: s.Scopes, + }) + } + if err != nil { + return nil, err + } + + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *SecurityScheme) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { + return err + } + return json.Unmarshal(data, &s.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go new file mode 100644 index 0000000000..876aa12759 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -0,0 +1,78 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" +) + +//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json +//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema +//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... +//go:generate perl -pi -e s,Json,JSON,g bindata.go + +const ( + // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs + SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" + // JSONSchemaURL the url for the json schema + JSONSchemaURL = "http://json-schema.org/draft-04/schema#" +) + +// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error +func MustLoadJSONSchemaDraft04() *Schema { + d, e := JSONSchemaDraft04() + if e != nil { + panic(e) + } + return d +} + +// JSONSchemaDraft04 loads the json schema document for json shema draft04 +func JSONSchemaDraft04() (*Schema, error) { + b, err := jsonschemaDraft04JSONBytes() + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} + +// MustLoadSwagger20Schema panics when Swagger20Schema returns an error +func MustLoadSwagger20Schema() *Schema { + d, e := Swagger20Schema() + if e != nil { + panic(e) + } + return d +} + +// Swagger20Schema loads the swagger 2.0 schema from the embedded assets +func Swagger20Schema() (*Schema, error) { + + b, err := v2SchemaJSONBytes() + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go new file mode 100644 index 0000000000..1590fd1751 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -0,0 +1,448 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + "strconv" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Swagger this is the root document object for the API specification. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) +// together into one document. +// +// For more information: http://goo.gl/8us55a#swagger-object- +type Swagger struct { + VendorExtensible + SwaggerProps +} + +// JSONLookup look up a value by the json property name +func (s Swagger) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) + return r, err +} + +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SwaggerProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON unmarshals a swagger spec from json +func (s *Swagger) UnmarshalJSON(data []byte) error { + var sw Swagger + if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { + return err + } + if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { + return err + } + *s = sw + return nil +} + +// GobEncode provides a safe gob encoder for Swagger, including extensions +func (s Swagger) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw := struct { + Props SwaggerProps + Ext VendorExtensible + }{ + Props: s.SwaggerProps, + Ext: s.VendorExtensible, + } + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Swagger, including extensions +func (s *Swagger) GobDecode(b []byte) error { + var raw struct { + Props SwaggerProps + Ext VendorExtensible + } + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + s.SwaggerProps = raw.Props + s.VendorExtensible = raw.Ext + return nil +} + +// SwaggerProps captures the top-level properties of an Api specification +// +// NOTE: validation rules +// - the scheme, when present must be from [http, https, ws, wss] +// - BasePath must start with a leading "/" +// - Paths is required +type SwaggerProps struct { + ID string `json:"id,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Swagger string `json:"swagger,omitempty"` + Info *Info `json:"info,omitempty"` + Host string `json:"host,omitempty"` + BasePath string `json:"basePath,omitempty"` + Paths *Paths `json:"paths"` + Definitions Definitions `json:"definitions,omitempty"` + Parameters map[string]Parameter `json:"parameters,omitempty"` + Responses map[string]Response `json:"responses,omitempty"` + SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Tags []Tag `json:"tags,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +type swaggerPropsAlias SwaggerProps + +type gobSwaggerPropsAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *swaggerPropsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements +func (o SwaggerProps) GobEncode() ([]byte, error) { + raw := gobSwaggerPropsAlias{ + Alias: (*swaggerPropsAlias)(&o), + } + + var b bytes.Buffer + if o.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(o.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(o.Security)) + for _, req := range o.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements +func (o *SwaggerProps) GobDecode(b []byte) error { + var raw gobSwaggerPropsAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *o = *(*SwaggerProps)(raw.Alias) + return nil +} + +// Dependencies represent a dependencies property +type Dependencies map[string]SchemaOrStringArray + +// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property +type SchemaOrBool struct { + Allows bool + Schema *Schema +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { + if token == "allows" { + return s.Allows, nil + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +// UnmarshalJSON converts this bool or schema object from a JSON structure +func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { + var nw SchemaOrBool + if len(data) > 0 { + if data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + nw.Allows = !bytes.Equal(data, []byte("false")) + } + *s = nw + return nil +} + +// SchemaOrStringArray represents a schema or a string array +type SchemaOrStringArray struct { + Schema *Schema + Property []string +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return []byte("null"), nil +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw SchemaOrStringArray + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +// Definitions contains the models explicitly defined in this spec +// An object to hold data types that can be consumed and produced by operations. +// These data types can be primitives, arrays or models. +// +// For more information: http://goo.gl/8us55a#definitionsObject +type Definitions map[string]Schema + +// SecurityDefinitions a declaration of the security schemes available to be used in the specification. +// This does not enforce the security schemes on the operations and only serves to provide +// the relevant details for each scheme. +// +// For more information: http://goo.gl/8us55a#securityDefinitionsObject +type SecurityDefinitions map[string]*SecurityScheme + +// StringOrArray represents a value that can either be a string +// or an array of strings. Mainly here for serialization purposes +type StringOrArray []string + +// Contains returns true when the value is contained in the slice +func (s StringOrArray) Contains(value string) bool { + for _, str := range s { + if str == value { + return true + } + } + return false +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { + if _, err := strconv.Atoi(token); err == nil { + r, _, err := jsonpointer.GetForToken(s.Schemas, token) + return r, err + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string +func (s *StringOrArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + + if first == '[' { + var parsed []string + if err := json.Unmarshal(data, &parsed); err != nil { + return err + } + *s = StringOrArray(parsed) + return nil + } + + var single interface{} + if err := json.Unmarshal(data, &single); err != nil { + return err + } + if single == nil { + return nil + } + switch v := single.(type) { + case string: + *s = StringOrArray([]string{v}) + return nil + default: + return fmt.Errorf("only string or array is allowed, not %T", single) + } +} + +// MarshalJSON converts this string or array to a JSON array or JSON string +func (s StringOrArray) MarshalJSON() ([]byte, error) { + if len(s) == 1 { + return json.Marshal([]string(s)[0]) + } + return json.Marshal([]string(s)) +} + +// SchemaOrArray represents a value that can either be a Schema +// or an array of Schema. Mainly here for serialization purposes +type SchemaOrArray struct { + Schema *Schema + Schemas []Schema +} + +// Len returns the number of schemas in this property +func (s SchemaOrArray) Len() int { + if s.Schema != nil { + return 1 + } + return len(s.Schemas) +} + +// ContainsType returns true when one of the schemas is of the specified type +func (s *SchemaOrArray) ContainsType(name string) bool { + if s.Schema != nil { + return s.Schema.Type != nil && s.Schema.Type.Contains(name) + } + return false +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalJSON() ([]byte, error) { + if len(s.Schemas) > 0 { + return json.Marshal(s.Schemas) + } + return json.Marshal(s.Schema) +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { + var nw SchemaOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Schemas); err != nil { + return err + } + } + *s = nw + return nil +} + +// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go new file mode 100644 index 0000000000..faa3d3de1e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -0,0 +1,75 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// TagProps describe a tag entry in the top level tags section of a swagger spec +type TagProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +// NewTag creates a new tag +func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { + return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} +} + +// Tag allows adding meta data to a single tag that is used by the +// [Operation Object](http://goo.gl/8us55a#operationObject). +// It is not mandatory to have a Tag Object per tag used there. +// +// For more information: http://goo.gl/8us55a#tagObject +type Tag struct { + VendorExtensible + TagProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (t Tag) JSONLookup(token string) (interface{}, error) { + if ex, ok := t.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(t.TagProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (t Tag) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(t.TagProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(t.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (t *Tag) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &t.TagProps); err != nil { + return err + } + return json.Unmarshal(data, &t.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/url_go19.go b/vendor/github.com/go-openapi/spec/url_go19.go new file mode 100644 index 0000000000..5bdfe40bcc --- /dev/null +++ b/vendor/github.com/go-openapi/spec/url_go19.go @@ -0,0 +1,11 @@ +package spec + +import "net/url" + +func parseURL(s string) (*url.URL, error) { + u, err := url.Parse(s) + if err == nil { + u.OmitHost = false + } + return u, err +} diff --git a/vendor/github.com/go-openapi/spec/validations.go b/vendor/github.com/go-openapi/spec/validations.go new file mode 100644 index 0000000000..6360a8ea77 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/validations.go @@ -0,0 +1,215 @@ +package spec + +// CommonValidations describe common JSON-schema validations +type CommonValidations struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + +// SetValidations defines all validations for a simple schema. +// +// NOTE: the input is the larger set of validations available for schemas. +// For simple schemas, MinProperties and MaxProperties are ignored. +func (v *CommonValidations) SetValidations(val SchemaValidations) { + v.Maximum = val.Maximum + v.ExclusiveMaximum = val.ExclusiveMaximum + v.Minimum = val.Minimum + v.ExclusiveMinimum = val.ExclusiveMinimum + v.MaxLength = val.MaxLength + v.MinLength = val.MinLength + v.Pattern = val.Pattern + v.MaxItems = val.MaxItems + v.MinItems = val.MinItems + v.UniqueItems = val.UniqueItems + v.MultipleOf = val.MultipleOf + v.Enum = val.Enum +} + +type clearedValidation struct { + Validation string + Value interface{} +} + +type clearedValidations []clearedValidation + +func (c clearedValidations) apply(cbs []func(string, interface{})) { + for _, cb := range cbs { + for _, cleared := range c { + cb(cleared.Validation, cleared.Value) + } + } +} + +// ClearNumberValidations clears all number validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearNumberValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 5) + defer func() { + done.apply(cbs) + }() + + if v.Minimum != nil { + done = append(done, clearedValidation{Validation: "minimum", Value: v.Minimum}) + v.Minimum = nil + } + if v.Maximum != nil { + done = append(done, clearedValidation{Validation: "maximum", Value: v.Maximum}) + v.Maximum = nil + } + if v.ExclusiveMaximum { + done = append(done, clearedValidation{Validation: "exclusiveMaximum", Value: v.ExclusiveMaximum}) + v.ExclusiveMaximum = false + } + if v.ExclusiveMinimum { + done = append(done, clearedValidation{Validation: "exclusiveMinimum", Value: v.ExclusiveMinimum}) + v.ExclusiveMinimum = false + } + if v.MultipleOf != nil { + done = append(done, clearedValidation{Validation: "multipleOf", Value: v.MultipleOf}) + v.MultipleOf = nil + } +} + +// ClearStringValidations clears all string validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearStringValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.Pattern != "" { + done = append(done, clearedValidation{Validation: "pattern", Value: v.Pattern}) + v.Pattern = "" + } + if v.MinLength != nil { + done = append(done, clearedValidation{Validation: "minLength", Value: v.MinLength}) + v.MinLength = nil + } + if v.MaxLength != nil { + done = append(done, clearedValidation{Validation: "maxLength", Value: v.MaxLength}) + v.MaxLength = nil + } +} + +// ClearArrayValidations clears all array validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearArrayValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.MaxItems != nil { + done = append(done, clearedValidation{Validation: "maxItems", Value: v.MaxItems}) + v.MaxItems = nil + } + if v.MinItems != nil { + done = append(done, clearedValidation{Validation: "minItems", Value: v.MinItems}) + v.MinItems = nil + } + if v.UniqueItems { + done = append(done, clearedValidation{Validation: "uniqueItems", Value: v.UniqueItems}) + v.UniqueItems = false + } +} + +// Validations returns a clone of the validations for a simple schema. +// +// NOTE: in the context of simple schema objects, MinProperties, MaxProperties +// and PatternProperties remain unset. +func (v CommonValidations) Validations() SchemaValidations { + return SchemaValidations{ + CommonValidations: v, + } +} + +// HasNumberValidations indicates if the validations are for numbers or integers +func (v CommonValidations) HasNumberValidations() bool { + return v.Maximum != nil || v.Minimum != nil || v.MultipleOf != nil +} + +// HasStringValidations indicates if the validations are for strings +func (v CommonValidations) HasStringValidations() bool { + return v.MaxLength != nil || v.MinLength != nil || v.Pattern != "" +} + +// HasArrayValidations indicates if the validations are for arrays +func (v CommonValidations) HasArrayValidations() bool { + return v.MaxItems != nil || v.MinItems != nil || v.UniqueItems +} + +// HasEnum indicates if the validation includes some enum constraint +func (v CommonValidations) HasEnum() bool { + return len(v.Enum) > 0 +} + +// SchemaValidations describes the validation properties of a schema +// +// NOTE: at this moment, this is not embedded in SchemaProps because this would induce a breaking change +// in the exported members: all initializers using litterals would fail. +type SchemaValidations struct { + CommonValidations + + PatternProperties SchemaProperties `json:"patternProperties,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` +} + +// HasObjectValidations indicates if the validations are for objects +func (v SchemaValidations) HasObjectValidations() bool { + return v.MaxProperties != nil || v.MinProperties != nil || v.PatternProperties != nil +} + +// SetValidations for schema validations +func (v *SchemaValidations) SetValidations(val SchemaValidations) { + v.CommonValidations.SetValidations(val) + v.PatternProperties = val.PatternProperties + v.MaxProperties = val.MaxProperties + v.MinProperties = val.MinProperties +} + +// Validations for a schema +func (v SchemaValidations) Validations() SchemaValidations { + val := v.CommonValidations.Validations() + val.PatternProperties = v.PatternProperties + val.MinProperties = v.MinProperties + val.MaxProperties = v.MaxProperties + return val +} + +// ClearObjectValidations returns a clone of the validations with all object validations cleared. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *SchemaValidations) ClearObjectValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.MaxProperties != nil { + done = append(done, clearedValidation{Validation: "maxProperties", Value: v.MaxProperties}) + v.MaxProperties = nil + } + if v.MinProperties != nil { + done = append(done, clearedValidation{Validation: "minProperties", Value: v.MinProperties}) + v.MinProperties = nil + } + if v.PatternProperties != nil { + done = append(done, clearedValidation{Validation: "patternProperties", Value: v.PatternProperties}) + v.PatternProperties = nil + } +} diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go new file mode 100644 index 0000000000..945a46703d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/xml_object.go @@ -0,0 +1,68 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// XMLObject a metadata object that allows for more fine-tuned XML model definitions. +// +// For more information: http://goo.gl/8us55a#xmlObject +type XMLObject struct { + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Prefix string `json:"prefix,omitempty"` + Attribute bool `json:"attribute,omitempty"` + Wrapped bool `json:"wrapped,omitempty"` +} + +// WithName sets the xml name for the object +func (x *XMLObject) WithName(name string) *XMLObject { + x.Name = name + return x +} + +// WithNamespace sets the xml namespace for the object +func (x *XMLObject) WithNamespace(namespace string) *XMLObject { + x.Namespace = namespace + return x +} + +// WithPrefix sets the xml prefix for the object +func (x *XMLObject) WithPrefix(prefix string) *XMLObject { + x.Prefix = prefix + return x +} + +// AsAttribute flags this object as xml attribute +func (x *XMLObject) AsAttribute() *XMLObject { + x.Attribute = true + return x +} + +// AsElement flags this object as an xml node +func (x *XMLObject) AsElement() *XMLObject { + x.Attribute = false + return x +} + +// AsWrapped flags this object as wrapped, this is mostly useful for array types +func (x *XMLObject) AsWrapped() *XMLObject { + x.Wrapped = true + return x +} + +// AsUnwrapped flags this object as an xml node +func (x *XMLObject) AsUnwrapped() *XMLObject { + x.Wrapped = false + return x +} diff --git a/vendor/github.com/go-openapi/strfmt/.editorconfig b/vendor/github.com/go-openapi/strfmt/.editorconfig new file mode 100644 index 0000000000..3152da69a5 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/strfmt/.gitattributes b/vendor/github.com/go-openapi/strfmt/.gitattributes new file mode 100644 index 0000000000..d020be8ea4 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.gitattributes @@ -0,0 +1,2 @@ +*.go text eol=lf + diff --git a/vendor/github.com/go-openapi/strfmt/.gitignore b/vendor/github.com/go-openapi/strfmt/.gitignore new file mode 100644 index 0000000000..dd91ed6a04 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml new file mode 100644 index 0000000000..22f8d21cca --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/strfmt/LICENSE b/vendor/github.com/go-openapi/strfmt/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md new file mode 100644 index 0000000000..f6b39c6c56 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/README.md @@ -0,0 +1,87 @@ +# Strfmt [![Build Status](https://github.com/go-openapi/strfmt/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/strfmt/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) + +This package exposes a registry of data types to support string formats in the go-openapi toolkit. + +strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those. + +## Supported data formats +go-openapi/strfmt follows the swagger 2.0 specification with the following formats +defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types). + +It also provides convenient extensions to go-openapi users. + +- [x] JSON-schema draft 4 formats + - date-time + - email + - hostname + - ipv4 + - ipv6 + - uri +- [x] swagger 2.0 format extensions + - binary + - byte (e.g. base64 encoded string) + - date (e.g. "1970-01-01") + - password +- [x] go-openapi custom format extensions + - bsonobjectid (BSON objectID) + - creditcard + - duration (e.g. "3 weeks", "1ms") + - hexcolor (e.g. "#FFFFFF") + - isbn, isbn10, isbn13 + - mac (e.g "01:02:03:04:05:06") + - rgbcolor (e.g. "rgb(100,100,100)") + - ssn + - uuid, uuid3, uuid4, uuid5 + - cidr (e.g. "192.0.2.1/24", "2001:db8:a0b:12f0::1/32") + - ulid (e.g. "00000PP9HGSBSSDZ1JTEXBJ0PW", [spec](https://github.com/ulid/spec)) + +> NOTE: as the name stands for, this package is intended to support string formatting only. +> It does not provide validation for numerical values with swagger format extension for JSON types "number" or +> "integer" (e.g. float, double, int32...). + +## Type conversion + +All types defined here are stringers and may be converted to strings with `.String()`. +Note that most types defined by this package may be converted directly to string like `string(Email{})`. + +`Date` and `DateTime` may be converted directly to `time.Time` like `time.Time(Time{})`. +Similarly, you can convert `Duration` to `time.Duration` as in `time.Duration(Duration{})` + +## Using pointers + +The `conv` subpackage provides helpers to convert the types to and from pointers, just like `go-openapi/swag` does +with primitive types. + +## Format types +Types defined in strfmt expose marshaling and validation capabilities. + +List of defined types: +- Base64 +- CreditCard +- Date +- DateTime +- Duration +- Email +- HexColor +- Hostname +- IPv4 +- IPv6 +- CIDR +- ISBN +- ISBN10 +- ISBN13 +- MAC +- ObjectId +- Password +- RGBColor +- SSN +- URI +- UUID +- UUID3 +- UUID4 +- UUID5 +- [ULID](https://github.com/ulid/spec) diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go new file mode 100644 index 0000000000..cfa9a526fe --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -0,0 +1,165 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "fmt" + + "go.mongodb.org/mongo-driver/bson" + + "go.mongodb.org/mongo-driver/bson/bsontype" + bsonprim "go.mongodb.org/mongo-driver/bson/primitive" +) + +func init() { + var id ObjectId + // register this format in the default registry + Default.Add("bsonobjectid", &id, IsBSONObjectID) +} + +// IsBSONObjectID returns true when the string is a valid BSON.ObjectId +func IsBSONObjectID(str string) bool { + _, err := bsonprim.ObjectIDFromHex(str) + return err == nil +} + +// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) +// +// swagger:strfmt bsonobjectid +type ObjectId bsonprim.ObjectID //nolint:revive,stylecheck + +// NewObjectId creates a ObjectId from a Hex String +func NewObjectId(hex string) ObjectId { //nolint:revive,stylecheck + oid, err := bsonprim.ObjectIDFromHex(hex) + if err != nil { + panic(err) + } + return ObjectId(oid) +} + +// MarshalText turns this instance into text +func (id ObjectId) MarshalText() ([]byte, error) { + oid := bsonprim.ObjectID(id) + if oid == bsonprim.NilObjectID { + return nil, nil + } + return []byte(oid.Hex()), nil +} + +// UnmarshalText hydrates this instance from text +func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on + if len(data) == 0 { + *id = ObjectId(bsonprim.NilObjectID) + return nil + } + oidstr := string(data) + oid, err := bsonprim.ObjectIDFromHex(oidstr) + if err != nil { + return err + } + *id = ObjectId(oid) + return nil +} + +// Scan read a value from a database driver +func (id *ObjectId) Scan(raw interface{}) error { + var data []byte + switch v := raw.(type) { + case []byte: + data = v + case string: + data = []byte(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v) + } + + return id.UnmarshalText(data) +} + +// Value converts a value to a database driver value +func (id ObjectId) Value() (driver.Value, error) { + return driver.Value(bsonprim.ObjectID(id).Hex()), nil +} + +func (id ObjectId) String() string { + return bsonprim.ObjectID(id).Hex() +} + +// MarshalJSON returns the ObjectId as JSON +func (id ObjectId) MarshalJSON() ([]byte, error) { + return bsonprim.ObjectID(id).MarshalJSON() +} + +// UnmarshalJSON sets the ObjectId from JSON +func (id *ObjectId) UnmarshalJSON(data []byte) error { + var obj bsonprim.ObjectID + if err := obj.UnmarshalJSON(data); err != nil { + return err + } + *id = ObjectId(obj) + return nil +} + +// MarshalBSON renders the object id as a BSON document +func (id ObjectId) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": bsonprim.ObjectID(id)}) +} + +// UnmarshalBSON reads the objectId from a BSON document +func (id *ObjectId) UnmarshalBSON(data []byte) error { + var obj struct { + Data bsonprim.ObjectID + } + if err := bson.Unmarshal(data, &obj); err != nil { + return err + } + *id = ObjectId(obj.Data) + return nil +} + +// MarshalBSONValue is an interface implemented by types that can marshal themselves +// into a BSON document represented as bytes. The bytes returned must be a valid +// BSON document if the error is nil. +func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { + oid := bsonprim.ObjectID(id) + return bson.TypeObjectID, oid[:], nil +} + +// UnmarshalBSONValue is an interface implemented by types that can unmarshal a +// BSON value representation of themselves. The BSON bytes and type can be +// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it +// wishes to retain the data after returning. +func (id *ObjectId) UnmarshalBSONValue(_ bsontype.Type, data []byte) error { + var oid bsonprim.ObjectID + copy(oid[:], data) + *id = ObjectId(oid) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (id *ObjectId) DeepCopyInto(out *ObjectId) { + *out = *id +} + +// DeepCopy copies the receiver into a new ObjectId. +func (id *ObjectId) DeepCopy() *ObjectId { + if id == nil { + return nil + } + out := new(ObjectId) + id.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go new file mode 100644 index 0000000000..3c93381c7c --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/date.go @@ -0,0 +1,187 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "time" + + "go.mongodb.org/mongo-driver/bson" +) + +func init() { + d := Date{} + // register this format in the default registry + Default.Add("date", &d, IsDate) +} + +// IsDate returns true when the string is a valid date +func IsDate(str string) bool { + _, err := time.Parse(RFC3339FullDate, str) + return err == nil +} + +const ( + // RFC3339FullDate represents a full-date as specified by RFC3339 + // See: http://goo.gl/xXOvVd + RFC3339FullDate = "2006-01-02" +) + +// Date represents a date from the API +// +// swagger:strfmt date +type Date time.Time + +// String converts this date into a string +func (d Date) String() string { + return time.Time(d).Format(RFC3339FullDate) +} + +// UnmarshalText parses a text representation into a date type +func (d *Date) UnmarshalText(text []byte) error { + if len(text) == 0 { + return nil + } + dd, err := time.ParseInLocation(RFC3339FullDate, string(text), DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(dd) + return nil +} + +// MarshalText serializes this date type to string +func (d Date) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// Scan scans a Date value from database driver type. +func (d *Date) Scan(raw interface{}) error { + switch v := raw.(type) { + case []byte: + return d.UnmarshalText(v) + case string: + return d.UnmarshalText([]byte(v)) + case time.Time: + *d = Date(v) + return nil + case nil: + *d = Date{} + return nil + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v", v) + } +} + +// Value converts Date to a primitive value ready to written to a database. +func (d Date) Value() (driver.Value, error) { + return driver.Value(d.String()), nil +} + +// MarshalJSON returns the Date as JSON +func (d Date) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(d).Format(RFC3339FullDate)) +} + +// UnmarshalJSON sets the Date from JSON +func (d *Date) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var strdate string + if err := json.Unmarshal(data, &strdate); err != nil { + return err + } + tt, err := time.ParseInLocation(RFC3339FullDate, strdate, DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(tt) + return nil +} + +func (d Date) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": d.String()}) +} + +func (d *Date) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if data, ok := m["data"].(string); ok { + rd, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(rd) + return nil + } + + return errors.New("couldn't unmarshal bson bytes value as Date") +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (d *Date) DeepCopyInto(out *Date) { + *out = *d +} + +// DeepCopy copies the receiver into a new Date. +func (d *Date) DeepCopy() *Date { + if d == nil { + return nil + } + out := new(Date) + d.DeepCopyInto(out) + return out +} + +// GobEncode implements the gob.GobEncoder interface. +func (d Date) GobEncode() ([]byte, error) { + return d.MarshalBinary() +} + +// GobDecode implements the gob.GobDecoder interface. +func (d *Date) GobDecode(data []byte) error { + return d.UnmarshalBinary(data) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d Date) MarshalBinary() ([]byte, error) { + return time.Time(d).MarshalBinary() +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Date) UnmarshalBinary(data []byte) error { + var original time.Time + + err := original.UnmarshalBinary(data) + if err != nil { + return err + } + + *d = Date(original) + + return nil +} + +// Equal checks if two Date instances are equal +func (d Date) Equal(d2 Date) bool { + return time.Time(d).Equal(time.Time(d2)) +} diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go new file mode 100644 index 0000000000..2813714060 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/default.go @@ -0,0 +1,2051 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/mail" + "regexp" + "strings" + + "github.com/asaskevich/govalidator" + "github.com/google/uuid" + "go.mongodb.org/mongo-driver/bson" +) + +const ( + // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114 + // A string instance is valid against this attribute if it is a valid + // representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. + // http://tools.ietf.org/html/rfc1034#section-3.5 + // ::= any one of the ten digits 0 through 9 + // var digit = /[0-9]/; + // ::= any one of the 52 alphabetic characters A through Z in upper case and a through z in lower case + // var letter = /[a-zA-Z]/; + // ::= | + // var letDig = /[0-9a-zA-Z]/; + // ::= | "-" + // var letDigHyp = /[-0-9a-zA-Z]/; + // ::= | + // var ldhStr = /[-0-9a-zA-Z]+/; + //