From e9dc7d29c1bd2f52b396281aeba5d0a93a2f45ea Mon Sep 17 00:00:00 2001 From: Chris Suszynski Date: Wed, 23 Feb 2022 15:43:18 +0100 Subject: [PATCH] Embed kn-event for release-1.0 (at af13ecf492aa) (#976) * Allow overriding of build.sh for downstream projects (knative/client#1544) * Adding override script for kn-plugin-event (#910) * Embed event plugin (#905) --- go.mod | 16 +- go.sum | 219 +- hack/build-flags.sh | 22 +- hack/build.sh | 25 +- hack/build.sh.d/README.md | 4 + hack/build.sh.d/kn-plugin-event.sh | 12 + pkg/kn/root/plugin_register.go | 1 + .../Azure/go-autorest/autorest/LICENSE | 191 + .../Azure/go-autorest/autorest/adal/LICENSE | 191 + .../Azure/go-autorest/autorest/date/LICENSE | 191 + .../Azure/go-autorest/logger/LICENSE | 191 + .../Azure/go-autorest/tracing/LICENSE | 191 + .../github.com/form3tech-oss/jwt-go/LICENSE | 8 + .../github.com/ghodss/yaml/LICENSE | 50 + .../github.com/klauspost/compress/LICENSE | 276 ++ .../compress/internal/snapref/LICENSE | 27 + .../github.com/thediveo/enumflag/LICENSE | 176 + .../github.com/wavesoftware/go-ensure/LICENSE | 201 ++ .../knative.dev/kn-plugin-event/LICENSE | 202 ++ .../go/compute/metadata/metadata.go | 2 +- .../github.com/Azure/go-autorest/.gitignore | 32 + .../github.com/Azure/go-autorest/CHANGELOG.md | 1004 ++++++ .../github.com/Azure/go-autorest/GNUmakefile | 23 + .../github.com/Azure/go-autorest/Gopkg.lock | 324 ++ .../github.com/Azure/go-autorest/Gopkg.toml | 59 + vendor/github.com/Azure/go-autorest/LICENSE | 191 + vendor/github.com/Azure/go-autorest/README.md | 165 + .../Azure/go-autorest/autorest/LICENSE | 191 + .../Azure/go-autorest/autorest/adal/LICENSE | 191 + .../Azure/go-autorest/autorest/adal/README.md | 292 ++ .../Azure/go-autorest/autorest/adal/config.go | 151 + .../go-autorest/autorest/adal/devicetoken.go | 273 ++ .../Azure/go-autorest/autorest/adal/go.mod | 12 + .../Azure/go-autorest/autorest/adal/go.sum | 19 + .../autorest/adal/go_mod_tidy_hack.go | 24 + .../go-autorest/autorest/adal/persist.go | 135 + .../Azure/go-autorest/autorest/adal/sender.go | 95 + .../Azure/go-autorest/autorest/adal/token.go | 1266 +++++++ .../go-autorest/autorest/adal/token_1.13.go | 77 + .../go-autorest/autorest/adal/token_legacy.go | 76 + .../go-autorest/autorest/adal/version.go | 45 + .../go-autorest/autorest/authorization.go | 353 ++ .../go-autorest/autorest/authorization_sas.go | 66 + .../autorest/authorization_storage.go | 307 ++ .../Azure/go-autorest/autorest/autorest.go | 150 + .../Azure/go-autorest/autorest/azure/async.go | 991 ++++++ .../Azure/go-autorest/autorest/azure/azure.go | 388 ++ .../autorest/azure/environments.go | 269 ++ .../autorest/azure/metadata_environment.go | 245 ++ .../Azure/go-autorest/autorest/azure/rp.go | 204 ++ .../Azure/go-autorest/autorest/client.go | 324 ++ .../Azure/go-autorest/autorest/date/LICENSE | 191 + .../Azure/go-autorest/autorest/date/date.go | 96 + .../Azure/go-autorest/autorest/date/go.mod | 5 + .../Azure/go-autorest/autorest/date/go.sum | 2 + .../autorest/date/go_mod_tidy_hack.go | 24 + .../Azure/go-autorest/autorest/date/time.go | 103 + .../go-autorest/autorest/date/timerfc1123.go | 100 + .../go-autorest/autorest/date/unixtime.go | 123 + .../go-autorest/autorest/date/utility.go | 25 + .../Azure/go-autorest/autorest/error.go | 103 + .../Azure/go-autorest/autorest/go.mod | 12 + .../Azure/go-autorest/autorest/go.sum | 23 + .../go-autorest/autorest/go_mod_tidy_hack.go | 24 + .../Azure/go-autorest/autorest/preparer.go | 547 +++ .../Azure/go-autorest/autorest/responder.go | 269 ++ .../go-autorest/autorest/retriablerequest.go | 52 + .../autorest/retriablerequest_1.7.go | 54 + .../autorest/retriablerequest_1.8.go | 66 + .../Azure/go-autorest/autorest/sender.go | 447 +++ .../Azure/go-autorest/autorest/utility.go | 232 ++ .../go-autorest/autorest/utility_1.13.go | 29 + .../go-autorest/autorest/utility_legacy.go | 31 + .../Azure/go-autorest/autorest/version.go | 41 + .../Azure/go-autorest/azure-pipelines.yml | 105 + vendor/github.com/Azure/go-autorest/doc.go | 18 + .../Azure/go-autorest/logger/LICENSE | 191 + .../Azure/go-autorest/logger/go.mod | 5 + .../Azure/go-autorest/logger/go.sum | 2 + .../go-autorest/logger/go_mod_tidy_hack.go | 24 + .../Azure/go-autorest/logger/logger.go | 328 ++ .../Azure/go-autorest/tracing/LICENSE | 191 + .../Azure/go-autorest/tracing/go.mod | 5 + .../Azure/go-autorest/tracing/go.sum | 2 + .../go-autorest/tracing/go_mod_tidy_hack.go | 24 + .../Azure/go-autorest/tracing/tracing.go | 67 + .../github.com/Shopify/sarama/.golangci.yml | 4 +- vendor/github.com/Shopify/sarama/CHANGELOG.md | 4 + vendor/github.com/Shopify/sarama/Makefile | 2 +- vendor/github.com/Shopify/sarama/README.md | 7 +- vendor/github.com/Shopify/sarama/admin.go | 103 + .../sarama/alter_client_quotas_request.go | 194 + .../sarama/alter_client_quotas_response.go | 145 + .../Shopify/sarama/alter_configs_response.go | 4 +- .../Shopify/sarama/api_versions_request.go | 61 +- .../Shopify/sarama/api_versions_response.go | 137 +- .../Shopify/sarama/async_producer.go | 9 +- vendor/github.com/Shopify/sarama/broker.go | 149 +- vendor/github.com/Shopify/sarama/client.go | 28 +- vendor/github.com/Shopify/sarama/config.go | 16 +- vendor/github.com/Shopify/sarama/consumer.go | 4 +- .../Shopify/sarama/consumer_group.go | 60 +- .../github.com/Shopify/sarama/decompress.go | 6 +- .../Shopify/sarama/delete_offsets_request.go | 92 + .../Shopify/sarama/delete_offsets_response.go | 112 + .../sarama/describe_client_quotas_request.go | 141 + .../sarama/describe_client_quotas_response.go | 235 ++ .../sarama/describe_configs_response.go | 6 +- vendor/github.com/Shopify/sarama/dev.yml | 2 +- .../Shopify/sarama/docker-compose.yml | 18 +- vendor/github.com/Shopify/sarama/go.mod | 16 +- vendor/github.com/Shopify/sarama/go.sum | 51 +- .../github.com/Shopify/sarama/mockbroker.go | 10 +- .../Shopify/sarama/mockresponses.go | 59 + .../Shopify/sarama/packet_decoder.go | 1 + .../Shopify/sarama/packet_encoder.go | 1 + .../github.com/Shopify/sarama/partitioner.go | 6 +- .../github.com/Shopify/sarama/prep_encoder.go | 4 + .../github.com/Shopify/sarama/quota_types.go | 21 + .../github.com/Shopify/sarama/real_decoder.go | 14 +- .../github.com/Shopify/sarama/real_encoder.go | 6 + vendor/github.com/Shopify/sarama/request.go | 8 +- vendor/github.com/Shopify/sarama/sarama.go | 26 +- .../Shopify/sarama/sync_producer.go | 15 +- vendor/github.com/Shopify/sarama/utils.go | 30 +- vendor/github.com/Shopify/sarama/version.go | 20 + .../cpuguy83/go-md2man/v2/md2man/roff.go | 105 +- .../github.com/evanphx/json-patch/v5/patch.go | 46 +- .../form3tech-oss/jwt-go/.gitignore | 5 + .../form3tech-oss/jwt-go/.travis.yml | 12 + .../github.com/form3tech-oss/jwt-go/LICENSE | 8 + .../form3tech-oss/jwt-go/MIGRATION_GUIDE.md | 97 + .../github.com/form3tech-oss/jwt-go/README.md | 104 + .../form3tech-oss/jwt-go/VERSION_HISTORY.md | 118 + .../github.com/form3tech-oss/jwt-go/claims.go | 136 + vendor/github.com/form3tech-oss/jwt-go/doc.go | 4 + .../github.com/form3tech-oss/jwt-go/ecdsa.go | 148 + .../form3tech-oss/jwt-go/ecdsa_utils.go | 69 + .../github.com/form3tech-oss/jwt-go/errors.go | 59 + .../github.com/form3tech-oss/jwt-go/hmac.go | 95 + .../form3tech-oss/jwt-go/map_claims.go | 102 + .../github.com/form3tech-oss/jwt-go/none.go | 52 + .../github.com/form3tech-oss/jwt-go/parser.go | 148 + vendor/github.com/form3tech-oss/jwt-go/rsa.go | 101 + .../form3tech-oss/jwt-go/rsa_pss.go | 142 + .../form3tech-oss/jwt-go/rsa_utils.go | 101 + .../form3tech-oss/jwt-go/signing_method.go | 35 + .../github.com/form3tech-oss/jwt-go/token.go | 108 + vendor/github.com/fsnotify/fsnotify/.mailmap | 2 + .../github.com/fsnotify/fsnotify/.travis.yml | 36 - vendor/github.com/fsnotify/fsnotify/AUTHORS | 16 +- .../github.com/fsnotify/fsnotify/CHANGELOG.md | 116 +- vendor/github.com/fsnotify/fsnotify/README.md | 6 +- vendor/github.com/fsnotify/fsnotify/fen.go | 1 + .../github.com/fsnotify/fsnotify/fsnotify.go | 1 + vendor/github.com/fsnotify/fsnotify/go.mod | 4 +- vendor/github.com/fsnotify/fsnotify/go.sum | 4 +- .../github.com/fsnotify/fsnotify/inotify.go | 3 +- .../fsnotify/fsnotify/inotify_poller.go | 1 + vendor/github.com/fsnotify/fsnotify/kqueue.go | 1 + .../fsnotify/fsnotify/open_mode_bsd.go | 1 + .../fsnotify/fsnotify/open_mode_darwin.go | 1 + .../github.com/fsnotify/fsnotify/windows.go | 1 + vendor/github.com/ghodss/yaml/.gitignore | 20 + vendor/github.com/ghodss/yaml/.travis.yml | 7 + vendor/github.com/ghodss/yaml/LICENSE | 50 + vendor/github.com/ghodss/yaml/README.md | 121 + vendor/github.com/ghodss/yaml/fields.go | 501 +++ vendor/github.com/ghodss/yaml/yaml.go | 277 ++ vendor/github.com/go-logr/logr/.golangci.yaml | 29 + vendor/github.com/go-logr/logr/CHANGELOG.md | 6 + .../github.com/go-logr/logr/CONTRIBUTING.md | 17 + vendor/github.com/go-logr/logr/README.md | 209 +- vendor/github.com/go-logr/logr/discard.go | 35 +- vendor/github.com/go-logr/logr/go.mod | 2 +- vendor/github.com/go-logr/logr/logr.go | 533 ++- vendor/github.com/golang/snappy/AUTHORS | 1 + vendor/github.com/golang/snappy/CONTRIBUTORS | 2 + vendor/github.com/golang/snappy/decode.go | 83 +- .../github.com/golang/snappy/encode_arm64.s | 4 +- vendor/github.com/imdario/mergo/.travis.yml | 3 + vendor/github.com/imdario/mergo/README.md | 2 +- vendor/github.com/imdario/mergo/merge.go | 9 +- .../klauspost/compress/.gitattributes | 2 + .../github.com/klauspost/compress/.gitignore | 25 + .../klauspost/compress/.goreleaser.yml | 137 + vendor/github.com/klauspost/compress/LICENSE | 276 ++ .../github.com/klauspost/compress/README.md | 438 +++ .../klauspost/compress/compressible.go | 85 + vendor/github.com/klauspost/compress/gen.sh | 4 + vendor/github.com/klauspost/compress/go.mod | 3 + vendor/github.com/klauspost/compress/go.sum | 0 .../klauspost/compress/huff0/compress.go | 64 + .../klauspost/compress/huff0/decompress.go | 275 +- .../klauspost/compress/huff0/huff0.go | 62 + .../compress/internal/snapref/LICENSE | 27 + .../compress/internal/snapref/decode.go | 264 ++ .../compress/internal/snapref/decode_other.go | 113 + .../compress/internal/snapref/encode.go | 289 ++ .../compress/internal/snapref/encode_other.go | 236 ++ .../compress/internal/snapref/snappy.go | 98 + vendor/github.com/klauspost/compress/s2sx.mod | 4 + vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 32 +- .../klauspost/compress/zstd/blockdec.go | 4 +- .../klauspost/compress/zstd/decoder.go | 5 +- .../compress/zstd/decoder_options.go | 25 +- .../klauspost/compress/zstd/enc_base.go | 4 +- .../klauspost/compress/zstd/enc_best.go | 161 +- .../klauspost/compress/zstd/enc_better.go | 64 +- .../klauspost/compress/zstd/enc_dfast.go | 61 +- .../klauspost/compress/zstd/enc_fast.go | 37 +- .../klauspost/compress/zstd/encoder.go | 31 +- .../compress/zstd/encoder_options.go | 2 +- .../klauspost/compress/zstd/framedec.go | 32 +- .../klauspost/compress/zstd/hash.go | 60 +- .../compress/zstd/internal/xxhash/xxhash.go | 1 - .../zstd/internal/xxhash/xxhash_amd64.go | 7 +- .../zstd/internal/xxhash/xxhash_amd64.s | 66 +- .../zstd/internal/xxhash/xxhash_other.go | 1 + .../klauspost/compress/zstd/snappy.go | 2 +- .../github.com/klauspost/compress/zstd/zip.go | 9 +- .../mitchellh/mapstructure/CHANGELOG.md | 10 +- .../mitchellh/mapstructure/decode_hooks.go | 3 +- .../mitchellh/mapstructure/mapstructure.go | 13 +- .../openzipkin/zipkin-go/model/annotation.go | 2 +- .../openzipkin/zipkin-go/model/doc.go | 2 +- .../openzipkin/zipkin-go/model/endpoint.go | 2 +- .../openzipkin/zipkin-go/model/kind.go | 2 +- .../openzipkin/zipkin-go/model/span.go | 2 +- .../openzipkin/zipkin-go/model/span_id.go | 2 +- .../openzipkin/zipkin-go/model/traceid.go | 2 +- vendor/github.com/pierrec/lz4/writer.go | 21 +- .../github.com/pierrec/lz4/writer_legacy.go | 182 + vendor/github.com/spf13/cast/.travis.yml | 16 - vendor/github.com/spf13/cast/README.md | 2 +- vendor/github.com/spf13/cast/cast.go | 5 + vendor/github.com/spf13/cast/caste.go | 148 +- .../spf13/cast/timeformattype_string.go | 27 + vendor/github.com/spf13/viper/README.md | 91 +- vendor/github.com/spf13/viper/go.mod | 13 +- vendor/github.com/spf13/viper/go.sum | 200 +- .../spf13/viper/internal/encoding/decoder.go | 61 + .../spf13/viper/internal/encoding/encoder.go | 60 + .../spf13/viper/internal/encoding/error.go | 7 + .../viper/internal/encoding/hcl/codec.go | 40 + .../viper/internal/encoding/json/codec.go | 17 + .../viper/internal/encoding/toml/codec.go | 45 + .../viper/internal/encoding/yaml/codec.go | 14 + vendor/github.com/spf13/viper/util.go | 14 +- vendor/github.com/spf13/viper/viper.go | 131 +- vendor/github.com/thediveo/enumflag/LICENSE | 176 + vendor/github.com/thediveo/enumflag/README.md | 166 + vendor/github.com/thediveo/enumflag/doc.go | 16 + .../thediveo/enumflag/enumflag.code-workspace | 7 + .../github.com/thediveo/enumflag/enumflag.go | 196 ++ .../github.com/thediveo/enumflag/enumslice.go | 94 + vendor/github.com/thediveo/enumflag/go.mod | 9 + vendor/github.com/thediveo/enumflag/go.sum | 148 + .../wavesoftware/go-ensure/.editorconfig | 15 + .../wavesoftware/go-ensure/.gitignore | 17 + .../wavesoftware/go-ensure/.travis.yml | 15 + .../github.com/wavesoftware/go-ensure/LICENSE | 201 ++ .../wavesoftware/go-ensure/Makefile | 32 + .../wavesoftware/go-ensure/README.md | 55 + .../wavesoftware/go-ensure/errors.go | 33 + .../github.com/wavesoftware/go-ensure/go.mod | 5 + .../github.com/wavesoftware/go-ensure/go.sum | 2 + .../wavesoftware/go-ensure/revive.toml | 35 + .../golang.org/x/crypto/pkcs12/bmp-string.go | 50 + vendor/golang.org/x/crypto/pkcs12/crypto.go | 131 + vendor/golang.org/x/crypto/pkcs12/errors.go | 23 + .../x/crypto/pkcs12/internal/rc2/rc2.go | 271 ++ vendor/golang.org/x/crypto/pkcs12/mac.go | 45 + vendor/golang.org/x/crypto/pkcs12/pbkdf.go | 170 + vendor/golang.org/x/crypto/pkcs12/pkcs12.go | 360 ++ vendor/golang.org/x/crypto/pkcs12/safebags.go | 57 + .../x/mod/internal/lazyregexp/lazyre.go | 78 + vendor/golang.org/x/mod/module/module.go | 54 +- vendor/golang.org/x/mod/module/pseudo.go | 250 ++ vendor/golang.org/x/mod/semver/semver.go | 20 + vendor/golang.org/x/net/http2/transport.go | 18 +- vendor/golang.org/x/oauth2/google/google.go | 28 +- .../externalaccount/basecredentials.go | 10 +- .../internal/externalaccount/impersonate.go | 36 +- .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 1 + vendor/golang.org/x/sys/plan9/pwd_plan9.go | 1 + vendor/golang.org/x/sys/plan9/race.go | 1 + vendor/golang.org/x/sys/plan9/race0.go | 1 + vendor/golang.org/x/sys/plan9/str.go | 1 + vendor/golang.org/x/sys/plan9/syscall.go | 1 + .../x/sys/plan9/zsyscall_plan9_386.go | 1 + .../x/sys/plan9/zsyscall_plan9_amd64.go | 1 + .../x/sys/plan9/zsyscall_plan9_arm.go | 1 + vendor/golang.org/x/sys/unix/README.md | 2 +- vendor/golang.org/x/sys/unix/mkall.sh | 2 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 10 +- .../golang.org/x/sys/unix/sockcmsg_linux.go | 49 + vendor/golang.org/x/sys/unix/syscall_aix.go | 22 +- vendor/golang.org/x/sys/unix/syscall_bsd.go | 24 +- .../golang.org/x/sys/unix/syscall_darwin.go | 61 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 77 +- .../x/sys/unix/syscall_linux_386.go | 10 - .../x/sys/unix/syscall_linux_amd64.go | 10 - .../x/sys/unix/syscall_linux_arm.go | 10 - .../x/sys/unix/syscall_linux_arm64.go | 16 - .../x/sys/unix/syscall_linux_mips64x.go | 10 - .../x/sys/unix/syscall_linux_mipsx.go | 10 - .../x/sys/unix/syscall_linux_ppc.go | 10 - .../x/sys/unix/syscall_linux_ppc64x.go | 10 - .../x/sys/unix/syscall_linux_riscv64.go | 16 - .../x/sys/unix/syscall_linux_s390x.go | 10 - .../x/sys/unix/syscall_linux_sparc64.go | 10 - .../golang.org/x/sys/unix/syscall_solaris.go | 16 +- .../x/sys/unix/syscall_zos_s390x.go | 16 +- vendor/golang.org/x/sys/unix/sysvshm_linux.go | 21 + vendor/golang.org/x/sys/unix/sysvshm_unix.go | 61 + .../x/sys/unix/sysvshm_unix_other.go | 14 + .../x/sys/unix/zerrors_darwin_amd64.go | 3126 +++++++++-------- .../x/sys/unix/zerrors_darwin_arm64.go | 3126 +++++++++-------- vendor/golang.org/x/sys/unix/zerrors_linux.go | 42 +- .../x/sys/unix/zerrors_linux_386.go | 3 +- .../x/sys/unix/zerrors_linux_amd64.go | 3 +- .../x/sys/unix/zerrors_linux_arm.go | 3 +- .../x/sys/unix/zerrors_linux_arm64.go | 3 +- .../x/sys/unix/zerrors_linux_mips.go | 3 +- .../x/sys/unix/zerrors_linux_mips64.go | 3 +- .../x/sys/unix/zerrors_linux_mips64le.go | 3 +- .../x/sys/unix/zerrors_linux_mipsle.go | 3 +- .../x/sys/unix/zerrors_linux_ppc.go | 3 +- .../x/sys/unix/zerrors_linux_ppc64.go | 3 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 3 +- .../x/sys/unix/zerrors_linux_riscv64.go | 3 +- .../x/sys/unix/zerrors_linux_s390x.go | 3 +- .../x/sys/unix/zerrors_linux_sparc64.go | 3 +- .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 22 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 20 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 20 +- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 18 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 59 + .../x/sys/unix/zsyscall_darwin_amd64.s | 24 + .../x/sys/unix/zsyscall_darwin_arm64.go | 59 + .../x/sys/unix/zsyscall_darwin_arm64.s | 24 + .../golang.org/x/sys/unix/zsyscall_linux.go | 60 +- .../x/sys/unix/zsyscall_linux_386.go | 21 - .../x/sys/unix/zsyscall_linux_amd64.go | 21 - .../x/sys/unix/zsyscall_linux_arm.go | 21 - .../x/sys/unix/zsyscall_linux_mips.go | 21 - .../x/sys/unix/zsyscall_linux_mips64.go | 21 - .../x/sys/unix/zsyscall_linux_mips64le.go | 21 - .../x/sys/unix/zsyscall_linux_mipsle.go | 21 - .../x/sys/unix/zsyscall_linux_ppc.go | 21 - .../x/sys/unix/zsyscall_linux_ppc64.go | 21 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 21 - .../x/sys/unix/zsyscall_linux_s390x.go | 21 - .../x/sys/unix/zsyscall_linux_sparc64.go | 21 - .../x/sys/unix/zsysnum_linux_386.go | 1 + .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm.go | 2 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + .../x/sys/unix/zsysnum_linux_riscv64.go | 1 + .../x/sys/unix/zsysnum_linux_s390x.go | 1 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + .../x/sys/unix/ztypes_darwin_amd64.go | 133 +- .../x/sys/unix/ztypes_darwin_arm64.go | 133 +- vendor/golang.org/x/sys/unix/ztypes_linux.go | 38 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 31 +- .../x/sys/unix/ztypes_linux_amd64.go | 28 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 31 +- .../x/sys/unix/ztypes_linux_arm64.go | 28 +- .../x/sys/unix/ztypes_linux_mips.go | 30 +- .../x/sys/unix/ztypes_linux_mips64.go | 28 +- .../x/sys/unix/ztypes_linux_mips64le.go | 28 +- .../x/sys/unix/ztypes_linux_mipsle.go | 30 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 32 +- .../x/sys/unix/ztypes_linux_ppc64.go | 27 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 27 +- .../x/sys/unix/ztypes_linux_riscv64.go | 28 +- .../x/sys/unix/ztypes_linux_s390x.go | 27 +- .../x/sys/unix/ztypes_linux_sparc64.go | 27 +- .../x/sys/unix/ztypes_openbsd_386.go | 11 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 11 +- .../x/sys/unix/ztypes_openbsd_arm.go | 11 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 11 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 11 +- vendor/golang.org/x/sys/windows/aliases.go | 4 +- vendor/golang.org/x/sys/windows/eventlog.go | 1 + .../x/sys/windows/memory_windows.go | 11 + vendor/golang.org/x/sys/windows/mksyscall.go | 3 +- vendor/golang.org/x/sys/windows/race.go | 1 + vendor/golang.org/x/sys/windows/race0.go | 1 + vendor/golang.org/x/sys/windows/service.go | 14 +- .../x/sys/windows/setupapi_windows.go | 1425 ++++++++ .../x/sys/windows/setupapierrors_windows.go | 100 - vendor/golang.org/x/sys/windows/str.go | 1 + vendor/golang.org/x/sys/windows/syscall.go | 1 + .../x/sys/windows/syscall_windows.go | 37 +- .../golang.org/x/sys/windows/types_windows.go | 351 +- .../x/sys/windows/zsyscall_windows.go | 505 +++ .../x/tools/go/ast/astutil/enclosing.go | 20 +- .../x/tools/go/ast/astutil/rewrite.go | 12 +- .../x/tools/internal/typeparams/common.go | 7 + .../internal/typeparams/enabled_go117.go | 4 +- .../internal/typeparams/enabled_go118.go | 4 +- .../x/tools/internal/typeparams/normalize.go | 216 ++ .../x/tools/internal/typeparams/termlist.go | 172 + .../internal/typeparams/typeparams_go117.go | 113 +- .../internal/typeparams/typeparams_go118.go | 87 +- .../x/tools/internal/typeparams/typeterm.go | 170 + .../googleapis/api/httpbody/httpbody.pb.go | 10 +- .../grpc/attributes/attributes.go | 78 +- .../grpc/balancer/base/balancer.go | 65 +- .../grpc/balancer/grpclb/state/state.go | 2 +- vendor/google.golang.org/grpc/clientconn.go | 181 +- .../grpc/credentials/credentials.go | 15 +- vendor/google.golang.org/grpc/dialoptions.go | 27 +- vendor/google.golang.org/grpc/go.mod | 3 +- vendor/google.golang.org/grpc/go.sum | 7 +- .../grpc/grpclog/loggerv2.go | 86 +- .../grpc/internal/envconfig/envconfig.go | 5 +- .../grpc/internal/grpcutil/grpcutil.go | 20 + .../grpc/internal/grpcutil/regex.go | 28 + .../grpc/internal/grpcutil/target.go | 89 - .../grpc/internal/metadata/metadata.go | 30 +- .../grpc/internal/resolver/config_selector.go | 2 +- .../grpc/internal/resolver/unix/unix.go | 12 +- .../grpc/internal/transport/controlbuf.go | 8 +- .../grpc/internal/transport/http2_client.go | 61 +- .../grpc/internal/transport/http2_server.go | 51 +- .../transport/networktype/networktype.go | 2 +- .../grpc/internal/xds/env/env.go | 8 +- .../grpc/internal/xds_handshake_cluster.go | 2 +- .../google.golang.org/grpc/picker_wrapper.go | 2 +- vendor/google.golang.org/grpc/resolver/map.go | 109 + .../grpc/resolver/resolver.go | 56 +- vendor/google.golang.org/grpc/server.go | 48 +- .../google.golang.org/grpc/status/status.go | 15 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/gopkg.in/ini.v1/.golangci.yml | 21 + vendor/gopkg.in/ini.v1/codecov.yml | 2 +- vendor/gopkg.in/ini.v1/ini.go | 4 +- vendor/gopkg.in/ini.v1/key.go | 17 +- vendor/gopkg.in/ini.v1/parser.go | 12 +- vendor/gopkg.in/ini.v1/section.go | 2 +- .../plugin/pkg/client/auth/azure/README.md | 56 + .../plugin/pkg/client/auth/azure/azure.go | 469 +++ .../client/auth/openstack/openstack_stub.go | 36 + .../plugin/pkg/client/auth/plugins.go | 22 + .../pkg/client/auth/plugins_providers.go | 26 + vendor/k8s.io/klog/v2/go.mod | 2 +- vendor/k8s.io/klog/v2/go.sum | 4 +- vendor/k8s.io/klog/v2/klog.go | 257 +- vendor/k8s.io/klog/v2/klog_file.go | 34 - vendor/k8s.io/klog/v2/klog_file_others.go | 19 + vendor/k8s.io/klog/v2/klog_file_windows.go | 34 + vendor/knative.dev/hack/library.sh | 95 +- vendor/knative.dev/hack/release.sh | 24 +- vendor/knative.dev/kn-plugin-event/LICENSE | 202 ++ .../kn-plugin-event/internal/cli/cmd/build.go | 51 + .../internal/cli/cmd/builder.go | 36 + .../kn-plugin-event/internal/cli/cmd/root.go | 124 + .../kn-plugin-event/internal/cli/cmd/send.go | 95 + .../internal/cli/cmd/testing.go | 46 + .../kn-plugin-event/internal/cli/cmd/types.go | 7 + .../internal/cli/cmd/version.go | 64 + .../kn-plugin-event/pkg/cli/create.go | 169 + .../kn-plugin-event/pkg/cli/ics/encoding.go | 58 + .../kn-plugin-event/pkg/cli/ics/send.go | 59 + .../kn-plugin-event/pkg/cli/ics/types.go | 33 + .../kn-plugin-event/pkg/cli/options.go | 114 + .../pkg/cli/retcode/retcode.go | 11 + .../kn-plugin-event/pkg/cli/send.go | 37 + .../kn-plugin-event/pkg/cli/target.go | 114 + .../kn-plugin-event/pkg/cli/types.go | 56 + .../kn-plugin-event/pkg/cli/version.go | 9 + .../kn-plugin-event/pkg/configuration/cli.go | 11 + .../pkg/configuration/defaults.go | 22 + .../kn-plugin-event/pkg/configuration/ics.go | 11 + .../pkg/configuration/memoized.go | 29 + .../kn-plugin-event/pkg/event/constants.go | 27 + .../kn-plugin-event/pkg/event/create.go | 81 + .../kn-plugin-event/pkg/event/sender.go | 41 + .../kn-plugin-event/pkg/event/types.go | 92 + .../pkg/k8s/addressresolver.go | 119 + .../kn-plugin-event/pkg/k8s/errors.go | 23 + .../kn-plugin-event/pkg/k8s/jobrunner.go | 143 + .../kn-plugin-event/pkg/k8s/kubeclient.go | 141 + .../kn-plugin-event/pkg/metadata/image.go | 32 + .../kn-plugin-event/pkg/metadata/names.go | 13 + .../kn-plugin-event/pkg/metadata/reflect.go | 18 + .../kn-plugin-event/pkg/metadata/version.go | 9 + .../kn-plugin-event/pkg/plugin/plugin.go | 46 + .../kn-plugin-event/pkg/plugin/testing.go | 35 + .../kn-plugin-event/pkg/sender/create.go | 38 + .../kn-plugin-event/pkg/sender/direct.go | 30 + .../kn-plugin-event/pkg/sender/in_cluster.go | 65 + .../kn-plugin-event/pkg/sender/namespace.go | 13 + .../kn-plugin-event/pkg/sender/types.go | 28 + .../serving/pkg/apis/serving/fieldmask.go | 2 + .../pkg/apis/serving/k8s_validation.go | 79 +- vendor/knative.dev/serving/test/e2e-common.sh | 14 +- vendor/modules.txt | 112 +- 509 files changed, 36869 insertions(+), 5675 deletions(-) create mode 100644 hack/build.sh.d/README.md create mode 100644 hack/build.sh.d/kn-plugin-event.sh create mode 100644 third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/LICENSE create mode 100644 third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/adal/LICENSE create mode 100644 third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/date/LICENSE create mode 100644 third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/logger/LICENSE create mode 100644 third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/tracing/LICENSE create mode 100644 third_party/VENDOR-LICENSE/github.com/form3tech-oss/jwt-go/LICENSE create mode 100644 third_party/VENDOR-LICENSE/github.com/ghodss/yaml/LICENSE create mode 100644 third_party/VENDOR-LICENSE/github.com/klauspost/compress/internal/snapref/LICENSE create mode 100644 third_party/VENDOR-LICENSE/github.com/thediveo/enumflag/LICENSE create mode 100644 third_party/VENDOR-LICENSE/github.com/wavesoftware/go-ensure/LICENSE create mode 100644 third_party/VENDOR-LICENSE/knative.dev/kn-plugin-event/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/.gitignore create mode 100644 vendor/github.com/Azure/go-autorest/CHANGELOG.md create mode 100644 vendor/github.com/Azure/go-autorest/GNUmakefile create mode 100644 vendor/github.com/Azure/go-autorest/Gopkg.lock create mode 100644 vendor/github.com/Azure/go-autorest/Gopkg.toml create mode 100644 vendor/github.com/Azure/go-autorest/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/README.md create mode 100644 vendor/github.com/Azure/go-autorest/autorest/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/README.md create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/config.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/persist.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/sender.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/token.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/version.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/autorest.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/async.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/azure.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/environments.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/rp.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/client.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/date.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/time.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/utility.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/error.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/autorest/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/preparer.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/responder.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/sender.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/utility.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/version.go create mode 100644 vendor/github.com/Azure/go-autorest/azure-pipelines.yml create mode 100644 vendor/github.com/Azure/go-autorest/doc.go create mode 100644 vendor/github.com/Azure/go-autorest/logger/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/logger/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/logger/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/logger/logger.go create mode 100644 vendor/github.com/Azure/go-autorest/tracing/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/tracing/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/tracing/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/tracing/tracing.go create mode 100644 vendor/github.com/Shopify/sarama/alter_client_quotas_request.go create mode 100644 vendor/github.com/Shopify/sarama/alter_client_quotas_response.go create mode 100644 vendor/github.com/Shopify/sarama/delete_offsets_request.go create mode 100644 vendor/github.com/Shopify/sarama/delete_offsets_response.go create mode 100644 vendor/github.com/Shopify/sarama/describe_client_quotas_request.go create mode 100644 vendor/github.com/Shopify/sarama/describe_client_quotas_response.go create mode 100644 vendor/github.com/Shopify/sarama/quota_types.go create mode 100644 vendor/github.com/Shopify/sarama/version.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/.gitignore create mode 100644 vendor/github.com/form3tech-oss/jwt-go/.travis.yml create mode 100644 vendor/github.com/form3tech-oss/jwt-go/LICENSE create mode 100644 vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md create mode 100644 vendor/github.com/form3tech-oss/jwt-go/README.md create mode 100644 vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md create mode 100644 vendor/github.com/form3tech-oss/jwt-go/claims.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/doc.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/ecdsa.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/errors.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/hmac.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/map_claims.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/none.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/parser.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/rsa.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/signing_method.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/token.go create mode 100644 vendor/github.com/fsnotify/fsnotify/.mailmap delete mode 100644 vendor/github.com/fsnotify/fsnotify/.travis.yml create mode 100644 vendor/github.com/ghodss/yaml/.gitignore create mode 100644 vendor/github.com/ghodss/yaml/.travis.yml create mode 100644 vendor/github.com/ghodss/yaml/LICENSE create mode 100644 vendor/github.com/ghodss/yaml/README.md create mode 100644 vendor/github.com/ghodss/yaml/fields.go create mode 100644 vendor/github.com/ghodss/yaml/yaml.go create mode 100644 vendor/github.com/go-logr/logr/.golangci.yaml create mode 100644 vendor/github.com/go-logr/logr/CHANGELOG.md create mode 100644 vendor/github.com/go-logr/logr/CONTRIBUTING.md create mode 100644 vendor/github.com/klauspost/compress/.gitattributes create mode 100644 vendor/github.com/klauspost/compress/.gitignore create mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml create mode 100644 vendor/github.com/klauspost/compress/README.md create mode 100644 vendor/github.com/klauspost/compress/compressible.go create mode 100644 vendor/github.com/klauspost/compress/gen.sh create mode 100644 vendor/github.com/klauspost/compress/go.mod create mode 100644 vendor/github.com/klauspost/compress/go.sum create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/LICENSE create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/snappy.go create mode 100644 vendor/github.com/klauspost/compress/s2sx.mod create mode 100644 vendor/github.com/klauspost/compress/s2sx.sum create mode 100644 vendor/github.com/pierrec/lz4/writer_legacy.go delete mode 100644 vendor/github.com/spf13/cast/.travis.yml create mode 100644 vendor/github.com/spf13/cast/timeformattype_string.go create mode 100644 vendor/github.com/spf13/viper/internal/encoding/decoder.go create mode 100644 vendor/github.com/spf13/viper/internal/encoding/encoder.go create mode 100644 vendor/github.com/spf13/viper/internal/encoding/error.go create mode 100644 vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go create mode 100644 vendor/github.com/spf13/viper/internal/encoding/json/codec.go create mode 100644 vendor/github.com/spf13/viper/internal/encoding/toml/codec.go create mode 100644 vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go create mode 100644 vendor/github.com/thediveo/enumflag/LICENSE create mode 100644 vendor/github.com/thediveo/enumflag/README.md create mode 100644 vendor/github.com/thediveo/enumflag/doc.go create mode 100644 vendor/github.com/thediveo/enumflag/enumflag.code-workspace create mode 100644 vendor/github.com/thediveo/enumflag/enumflag.go create mode 100644 vendor/github.com/thediveo/enumflag/enumslice.go create mode 100644 vendor/github.com/thediveo/enumflag/go.mod create mode 100644 vendor/github.com/thediveo/enumflag/go.sum create mode 100644 vendor/github.com/wavesoftware/go-ensure/.editorconfig create mode 100644 vendor/github.com/wavesoftware/go-ensure/.gitignore create mode 100644 vendor/github.com/wavesoftware/go-ensure/.travis.yml create mode 100644 vendor/github.com/wavesoftware/go-ensure/LICENSE create mode 100644 vendor/github.com/wavesoftware/go-ensure/Makefile create mode 100644 vendor/github.com/wavesoftware/go-ensure/README.md create mode 100644 vendor/github.com/wavesoftware/go-ensure/errors.go create mode 100644 vendor/github.com/wavesoftware/go-ensure/go.mod create mode 100644 vendor/github.com/wavesoftware/go-ensure/go.sum create mode 100644 vendor/github.com/wavesoftware/go-ensure/revive.toml create mode 100644 vendor/golang.org/x/crypto/pkcs12/bmp-string.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/crypto.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/errors.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/mac.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pbkdf.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pkcs12.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/safebags.go create mode 100644 vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go create mode 100644 vendor/golang.org/x/mod/module/pseudo.go create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_linux.go create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_unix.go create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_unix_other.go create mode 100644 vendor/golang.org/x/sys/windows/setupapi_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/setupapierrors_windows.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/normalize.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/termlist.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeterm.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/regex.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/target.go create mode 100644 vendor/google.golang.org/grpc/resolver/map.go create mode 100644 vendor/gopkg.in/ini.v1/.golangci.yml create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/README.md create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack_stub.go create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins.go create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins_providers.go create mode 100644 vendor/k8s.io/klog/v2/klog_file_others.go create mode 100644 vendor/k8s.io/klog/v2/klog_file_windows.go create mode 100644 vendor/knative.dev/kn-plugin-event/LICENSE create mode 100644 vendor/knative.dev/kn-plugin-event/internal/cli/cmd/build.go create mode 100644 vendor/knative.dev/kn-plugin-event/internal/cli/cmd/builder.go create mode 100644 vendor/knative.dev/kn-plugin-event/internal/cli/cmd/root.go create mode 100644 vendor/knative.dev/kn-plugin-event/internal/cli/cmd/send.go create mode 100644 vendor/knative.dev/kn-plugin-event/internal/cli/cmd/testing.go create mode 100644 vendor/knative.dev/kn-plugin-event/internal/cli/cmd/types.go create mode 100644 vendor/knative.dev/kn-plugin-event/internal/cli/cmd/version.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/cli/create.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/cli/ics/encoding.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/cli/ics/send.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/cli/ics/types.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/cli/options.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/cli/retcode/retcode.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/cli/send.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/cli/target.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/cli/types.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/cli/version.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/configuration/cli.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/configuration/defaults.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/configuration/ics.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/configuration/memoized.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/event/constants.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/event/create.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/event/sender.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/event/types.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/k8s/addressresolver.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/k8s/errors.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/k8s/jobrunner.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/k8s/kubeclient.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/metadata/image.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/metadata/names.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/metadata/reflect.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/metadata/version.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/plugin/plugin.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/plugin/testing.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/sender/create.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/sender/direct.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/sender/in_cluster.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/sender/namespace.go create mode 100644 vendor/knative.dev/kn-plugin-event/pkg/sender/types.go diff --git a/go.mod b/go.mod index a8c0fd3e03..c317933f58 100644 --- a/go.mod +++ b/go.mod @@ -8,20 +8,26 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/spf13/cobra v1.2.1 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.8.1 + github.com/spf13/viper v1.9.0 golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d gotest.tools/v3 v3.0.3 k8s.io/api v0.21.4 k8s.io/apiextensions-apiserver v0.21.4 - k8s.io/apimachinery v0.21.4 + k8s.io/apimachinery v0.22.3 k8s.io/cli-runtime v0.21.4 k8s.io/client-go v0.21.4 k8s.io/code-generator v0.21.4 - knative.dev/eventing v0.27.0 - knative.dev/hack v0.0.0-20211101195839-11d193bf617b + knative.dev/eventing v0.27.2 + knative.dev/hack v0.0.0-20211122163517-fe1340f21191 + knative.dev/kn-plugin-event v0.27.1 knative.dev/kn-plugin-source-kafka v0.27.0 knative.dev/networking v0.0.0-20211101215640-8c71a2708e7d knative.dev/pkg v0.0.0-20211101212339-96c0204a70dc - knative.dev/serving v0.27.0 + knative.dev/serving v0.27.1 sigs.k8s.io/yaml v1.3.0 ) + +replace ( + k8s.io/apimachinery => k8s.io/apimachinery v0.21.4 + knative.dev/kn-plugin-event => github.com/openshift-knative/kn-plugin-event v0.27.1-0.20220223114256-af13ecf492aa +) diff --git a/go.sum b/go.sum index 08b5c6c80f..f677564f18 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,9 @@ cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWc cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0 h1:w6LozQJyDDEyhf64Uusu1LCcnLt0I1VMLiJC2kV+eXk= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -37,6 +38,7 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -69,6 +71,7 @@ github.com/Azure/go-amqp v0.13.0/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1 github.com/Azure/go-amqp v0.13.1/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= @@ -76,6 +79,7 @@ github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdA github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.17 h1:2zCdHwNgRH+St1J+ZMf66xI8aLr/5KMy+wWLH97zwYM= github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= @@ -83,16 +87,19 @@ github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMl github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.10 h1:r6fZHMaHD8B6LDCn0o5vyBFHIHrM6Ywwx7mb49lPItI= github.com/Azure/go-autorest/autorest/adal v0.9.10/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= @@ -102,10 +109,13 @@ github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRY github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317/go.mod h1:DF8FZRxMHMGv/vP2lQP6h+dYzzjpuRn24VeRiYn3qjQ= @@ -119,6 +129,7 @@ github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JP github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -126,6 +137,7 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -142,10 +154,13 @@ github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:H github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= github.com/Shopify/sarama v1.25.0/go.mod h1:y/CFFTO9eaMTNriwu/Q+W4eioLqiDMGkA1W+gmdfj8w= -github.com/Shopify/sarama v1.29.1 h1:wBAacXbYVLmWieEA/0X/JagDdCZ8NVFOfS6l6+2u5S0= github.com/Shopify/sarama v1.29.1/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE= +github.com/Shopify/sarama v1.30.0 h1:TOZL6r37xJBDEMLx4yjB77jxbZYXPaDow08TSK6vIL0= +github.com/Shopify/sarama v1.30.0/go.mod h1:zujlQQx1kzHsh4jfV1USnptCQrHAEZ2Hk8fTKCulPVs= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae h1:ePgznFqEG1v3AjMklnK8H7BSc++FDSo7xfK9K7Af+0Y= +github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae/go.mod h1:/cvHQkZ1fst0EmZnA5dFtiQdWCNCFYzb+uE2vqVgvx0= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= @@ -157,6 +172,7 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -166,6 +182,7 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -187,6 +204,7 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= @@ -215,6 +233,7 @@ github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tj github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -223,6 +242,7 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudevents/conformance v0.2.0/go.mod h1:rHKDwylBH89Rns6U3wL9ww8bg9/4GbwRCDNuyoC6bcc= @@ -234,8 +254,11 @@ github.com/cloudevents/sdk-go/v2 v2.4.1/go.mod h1:MZiMwmAh5tGj+fPFvtHv9hKurKqXtd github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= @@ -269,7 +292,9 @@ github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7 github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -300,6 +325,8 @@ github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3 github.com/containerd/stargz-snapshotter/estargz v0.0.0-20201223015020-a9a0c2d64694/go.mod h1:E9uVkkBKf0EaC39j2JVW9EzdNhYvpz6eQIjILHebruk= github.com/containerd/stargz-snapshotter/estargz v0.6.4/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw= github.com/containerd/stargz-snapshotter/estargz v0.7.0/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw= +github.com/containerd/stargz-snapshotter/estargz v0.8.0/go.mod h1:mwIwuwb+D8FX2t45Trwi0hmWmZm5VW7zPP/rekwhWQU= +github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -342,8 +369,9 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -352,6 +380,7 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -368,13 +397,17 @@ github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyG github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.10+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.10+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= @@ -383,8 +416,8 @@ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHz github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dprotaso/go-yit v0.0.0-20191028211022-135eb7262960/go.mod h1:9HQzr9D/0PGwMEbC3d5AB7oi67+h4TsQqItC1GVYG58= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -414,9 +447,14 @@ github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMi github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.5.0 h1:bAmFiUJ+o0o2B4OiTFeE3MqCOtyo+jjPP9iZ0VRxYUc= +github.com/evanphx/json-patch/v5 v5.2.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= @@ -427,11 +465,13 @@ github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= @@ -452,8 +492,10 @@ github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -515,6 +557,8 @@ github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-training/helloworld v0.0.0-20200225145412-ba5f4379d78b/go.mod h1:hGGmX3bRUkYkc9aKA6mkUxi6d+f1GmZF1je0FlVTgwU= github.com/gobuffalo/flect v0.2.2/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= @@ -572,8 +616,9 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= github.com/gonum/diff v0.0.0-20181124234638-500114f11e71/go.mod h1:22dM4PLscQl+Nzf64qNBurVJvfyvZELT0iRW2l/NN70= github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= @@ -611,6 +656,7 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/ko v0.9.3/go.mod h1:Q6jYoY4SzKhRTaniQgnNoLaj4e4fVIWTBLK4FTJQPtM= github.com/google/licenseclassifier v0.0.0-20200708223521-3d09a0ea2f39/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= github.com/google/mako v0.0.0-20190821191249-122f8dcef9e3/go.mod h1:YzLcVlL+NqWnmUEPuhS1LxDDwGO9WNbVlEXaF4IH35g= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -648,7 +694,6 @@ github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3i github.com/googleapis/gnostic v0.5.3 h1:2qsuRm+bzgwSIKikigPASa2GhW8H2Dn4Qq7UxD8K/48= github.com/googleapis/gnostic v0.5.3/go.mod h1:TRWw1s4gxBGjSe301Dai3c7wXJAZy57+/6tawkOvqHQ= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= @@ -656,6 +701,7 @@ github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -675,18 +721,25 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -704,8 +757,11 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -714,8 +770,9 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= @@ -746,6 +803,7 @@ github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHW github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -759,7 +817,6 @@ github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMW github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= @@ -775,8 +832,10 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.0 h1:2T7tUoQrQT+fQWdaY5rjWztFGAFwbGD04iPJg90ZiOs= github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -797,6 +856,7 @@ github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-b github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac/go.mod h1:Frd2bnT3w5FB5q49ENTfVlztJES+1k/7lyWX2+9gq/M= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= @@ -813,9 +873,20 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattmoor/dep-notify v0.0.0-20190205035814-a45dec370a17/go.mod h1:qWnF4u+oS4UWOZMwZcBQXrt5IQIdWc6XVJLDdxGIfdQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= @@ -828,10 +899,12 @@ github.com/maximilien/kn-source-pkg v0.6.3 h1:5S9jgof8KSvde/hrM0VaOMZ6LmBPTIMO05 github.com/maximilien/kn-source-pkg v0.6.3/go.mod h1:3Nym+ahhV18lkHutauJdw1r3PmlDfu/DoTUuF8ySMt4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.17/go.mod h1:WgzbA6oji13JREwiNsRDNfl7jYdPnmz+VEuLrA+/48M= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -842,8 +915,9 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= @@ -876,8 +950,9 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -893,8 +968,10 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -905,8 +982,9 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U= github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= +github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= +github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -915,19 +993,25 @@ github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go. github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20210730191737-8e42a01fb1b7/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/openshift-knative/kn-plugin-event v0.27.1-0.20220223114256-af13ecf492aa h1:Zmaj3Ia2pRoN4/HqbiDu9TBkxoPk7/9h6R53OGtOAlA= +github.com/openshift-knative/kn-plugin-event v0.27.1-0.20220223114256-af13ecf492aa/go.mod h1:z/RbZ2zAWlirxAvKNNikas7S19P8V1nr8XXG0cXr3M0= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -936,28 +1020,32 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.5 h1:UwtQQx2pyPIgWYHRg+epgdx1/HnBQTgN3/oIYEJTQzU= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= +github.com/openzipkin/zipkin-go v0.3.0 h1:XtuXmOLIXLjiU2XduuWREDT0LOKtSgos/g7i7RYyoZQ= +github.com/openzipkin/zipkin-go v0.3.0/go.mod h1:4c3sLeE8xjNqehmF5RpAFLPLJxXscc0R4l6Zg0P1tTQ= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.0-beta.2/go.mod h1:+X+aW6gUj6Hda43TeYHVCIvYNG/jqY/8ZFXAeXXHl+Q= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -968,6 +1056,7 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -1000,8 +1089,9 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.31.1 h1:d18hG4PkHnNAKNMOmFuXFaiY8Us0nird/2m60uS1AMs= github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1020,6 +1110,7 @@ github.com/prometheus/statsd_exporter v0.15.0/go.mod h1:Dv8HnkoLQkeEjkIE4/2ndAA7 github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf33yKI+pyX6T8= github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rabbitmq/amqp091-go v1.1.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= @@ -1043,6 +1134,7 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= @@ -1063,10 +1155,8 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/slinkydeveloper/loadastic v0.0.0-20201218203601-5c69eea3b7d8/go.mod h1:vNSJ7ak+2OeAb5bhSHRmMKKcbadbxlokx/Wh1lC50e4= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= @@ -1076,11 +1166,13 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= @@ -1099,8 +1191,9 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk= +github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= github.com/sqs/goreturns v0.0.0-20181028201513-538ac6014518/go.mod h1:CKI4AZ4XmGV240rTHfO0hfE83S6/a3/Q1siZJ/vXf7A= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= @@ -1127,6 +1220,8 @@ github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/thediveo/enumflag v0.10.0 h1:CQIGWQYgFIKTUvRf5FL3K5ksQzzBag/HSUCu3Oin+Jw= +github.com/thediveo/enumflag v0.10.0/go.mod h1:zbsjXNJ3Np5ZXjUDgXeXsF2THwQd6ybEKMLw9XnQZ8o= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1140,8 +1235,11 @@ github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/vdemeester/k8s-pkg-credentialprovider v1.19.7/go.mod h1:K2nMO14cgZitdwBqdQps9tInJgcaXcU/7q5F59lpbNI= github.com/vdemeester/k8s-pkg-credentialprovider v1.21.0-1/go.mod h1:l4LxiP0cmEcc5q4BTDE8tZSyIiyXe0T28x37yHpMzoM= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= @@ -1152,7 +1250,9 @@ github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmF github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/wavesoftware/go-ensure v1.0.0 h1:6X3gQL5psBWwtu/H9a+69xQ+JGTUELaLhgOB/iB3AQk= github.com/wavesoftware/go-ensure v1.0.0/go.mod h1:K2UAFSwMTvpiRGay/M3aEYYuurcR8S4A6HkQlJPV8k4= +github.com/wavesoftware/go-magetasks v0.6.0/go.mod h1:8/zmKLAbJFwjAZOF8U2BnmVa9aDrLPtd4XO5b67uflY= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= @@ -1175,6 +1275,7 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= @@ -1246,6 +1347,7 @@ golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1256,8 +1358,9 @@ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210920023735-84f357641f63 h1:kETrAMYZq6WVGPa8IIixL0CaEcIUNi+1WX7grUoi3y8= +golang.org/x/crypto v0.0.0-20210920023735-84f357641f63/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1296,8 +1399,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1355,12 +1459,15 @@ golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY= +golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211020060615-d418f374d309 h1:A0lJIi+hcTR6aajJH4YqKWwohY4aW9RO7oRMcdv+HKI= +golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1379,8 +1486,9 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 h1:B333XXssMuKQeBwiNODx4TupZy7bf4sxFZnN2ZOcvUE= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1404,6 +1512,7 @@ golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1425,10 +1534,12 @@ golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1476,6 +1587,7 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1499,8 +1611,12 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 h1:J27LZFQBFoihqXoegpscI10HpjZ7B5WQLLKL2FZXQKw= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 h1:TyHqChC80pFkXWraUUf6RuB5IqFdQieMLwwCJokV2pc= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1513,8 +1629,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1595,6 +1712,7 @@ golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= @@ -1603,8 +1721,10 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ= +golang.org/x/tools v0.1.6/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= +golang.org/x/tools v0.1.8 h1:P1HhGGuLW4aAclzjtmJdf0mJOjVUZUzOTqkAkWL+l6w= +golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1654,8 +1774,9 @@ google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6 google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.58.0 h1:MDkAbYIB1JpSgCTOCYYoIec/coMlKK4oVbpnBLLcyT0= google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= +google.golang.org/api v0.61.0 h1:TXXKS1slM3b2bZNJwD5DV/Tp6/M2cLzLOLh9PjDhrw8= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1730,8 +1851,11 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211016002631-37fc39342514 h1:Rp1vYDPD4TdkMH5S/bZbopsGCsWhPcrLBUwOVhAQCxM= google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211027162914-98a5263abeca/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12 h1:DN5b3HU13J4sMd/QjDx34U6afpaexKTDdop+26pdjdk= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1764,8 +1888,9 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1803,8 +1928,9 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c= +gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= @@ -1830,6 +1956,7 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20191026110619-0b21df46bc1d/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= @@ -1859,12 +1986,6 @@ k8s.io/api v0.21.4/go.mod h1:fTVGP+M4D8+00FN2cMnJqk/eb/GH53bvmNs2SVTmpFk= k8s.io/apiextensions-apiserver v0.19.7/go.mod h1:XJNNtjISNNePDEUClHt/igzMpQcmjVVh88QH+PKztPU= k8s.io/apiextensions-apiserver v0.21.4 h1:HkajN/vmT/9HnFmUxvpXfSGkTCvH/ax4e3+j6mqWUDU= k8s.io/apiextensions-apiserver v0.21.4/go.mod h1:OoC8LhI9LnV+wKjZkXIBbLUwtnOGJiTRE33qctH5CIk= -k8s.io/apimachinery v0.19.7/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= -k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apimachinery v0.21.4 h1:KDq0lWZVslHkuE5I7iGAQHwpK0aDTlar1E7IWEc4CNw= k8s.io/apimachinery v0.21.4/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= k8s.io/apiserver v0.19.7/go.mod h1:DmWVQggNePspa+vSsVytVbS3iBSDTXdJVt0akfHacKk= @@ -1916,8 +2037,10 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.40.1 h1:P4RRucWk/lFOlDdkAr3mc7iWFkgKrZY9qZMAgek06S4= +k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210113233702-8566a335510f/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= @@ -1936,14 +2059,18 @@ knative.dev/client v0.21.0/go.mod h1:1En9uxMhk62EReWR1d66/d3tnpkot/D3vBRfmuidFNc knative.dev/client v0.27.0/go.mod h1:FmXziLwolJtqaH3Jxe8cxqWjBUHXU6LsRsMyzlMSFFM= knative.dev/control-protocol v0.0.0-20211101215039-a32569595694/go.mod h1:HlIoGfErGnSVKN6NI3iG5gXxCo9EcdV8sCDGW2IObJo= knative.dev/eventing v0.21.0/go.mod h1:JjbVEOTJJHqo9CTxbTfrMn018hG8fOr3UfBoCJ7KWaA= -knative.dev/eventing v0.27.0 h1:P9rqKvsCeFb8sEhfK32WFTG7awdOAa7XO6p3umSq/wU= knative.dev/eventing v0.27.0/go.mod h1:4ppWQEQ/2B66/YFENDmV1Gjxs4meLpz6UTUgLkkINt4= +knative.dev/eventing v0.27.2 h1:Zslo5WhIxAHAztLjoSCcnW52SNO2hIumK9dMp9V/Zc8= +knative.dev/eventing v0.27.2/go.mod h1:k/Xt54hNh9YfMSYdMTkJ0TNrkwU9hL+Frn+n9zsAdaE= knative.dev/eventing-kafka v0.27.0 h1:mfbQbK+g9PiBHL9FoLTNckQRvY4EzMfmR2z25zTKJeU= knative.dev/eventing-kafka v0.27.0/go.mod h1:bt9HdIsI9RMgiWfcsrDT2vH8ADGY+ehN4a6Bj4QH5Jo= knative.dev/hack v0.0.0-20210203173706-8368e1f6eacf/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= -knative.dev/hack v0.0.0-20211101195839-11d193bf617b h1:DaW1iliZlBAwq/I8gTqVu8UnfGxyb5yR7CDsJi5jyWk= knative.dev/hack v0.0.0-20211101195839-11d193bf617b/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= +knative.dev/hack v0.0.0-20211112152736-4de3924914b2/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= +knative.dev/hack v0.0.0-20211122163517-fe1340f21191 h1:yRUruaFxjk6tkubtg44Nya0phUm1idKZr5bFO30AkmE= +knative.dev/hack v0.0.0-20211122163517-fe1340f21191/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= knative.dev/hack/schema v0.0.0-20211101195839-11d193bf617b/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0= +knative.dev/hack/schema v0.0.0-20211122163517-fe1340f21191/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0= knative.dev/kn-plugin-source-kafka v0.27.0 h1:zOxYTlm/BuT6R7gSBgEVbSMFVgt0XfBKJlP1PM3mjks= knative.dev/kn-plugin-source-kafka v0.27.0/go.mod h1:lo7J8MK+NZAqdy0y7DsUlbVLizZNf37fEL+vOOBNw2Y= knative.dev/networking v0.0.0-20210215030235-088986a1c2a3/go.mod h1:pmAMQjMqQUxpK0UyjE71KljMs6rwDMVIAlvrZsU3I6Y= @@ -1958,8 +2085,9 @@ knative.dev/pkg v0.0.0-20211101212339-96c0204a70dc/go.mod h1:SkfDk9bWIiNZD7XtILG knative.dev/reconciler-test v0.0.0-20210216030508-77f50054d024/go.mod h1:RP/K5xJylB72Go6eAsXYEsQHp4zCCNMNjmsqhvq7wko= knative.dev/reconciler-test v0.0.0-20211101214439-9839937c9b13/go.mod h1:gTsbLk496j/M9xqMpx/liyCQ0X3bwDpRtcs2Zzws364= knative.dev/serving v0.21.0/go.mod h1:PU9k1Y6YMG27XQldEu5agNkcebvSafUXKXPircQYCsE= -knative.dev/serving v0.27.0 h1:pstGHQ8aSJsIwdofkjJV6TrmPN09u/2qN1Mgfc92LQs= knative.dev/serving v0.27.0/go.mod h1:Lcqv3K/DeDt6ZoeWTgpjxc/0teS7uMebnnU+gXIw99c= +knative.dev/serving v0.27.1 h1:d3cazLiXkvFIcqPYGlV+qvGkMdqb5yI6kiw0FovC4dc= +knative.dev/serving v0.27.1/go.mod h1:70KmpwlBztk82pHmwRUkPGasQChFmefUyZWMDOzEoKk= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= @@ -1973,6 +2101,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQb sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/kind v0.11.1/go.mod h1:fRpgVhtqAWrtLB9ED7zQahUimpUXuG/iHT88xYqEGIA= sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/kustomize/api v0.8.8 h1:G2z6JPSSjtWWgMeWSoHdXqyftJNmMmyxXpwENGoOtGE= diff --git a/hack/build-flags.sh b/hack/build-flags.sh index 669c8ec8a3..c02eca058a 100644 --- a/hack/build-flags.sh +++ b/hack/build-flags.sh @@ -14,20 +14,30 @@ function build_flags() { local base="${1}" - local now="$(date -u '+%Y-%m-%d %H:%M:%S')" - local rev="$(git rev-parse --short HEAD)" + local now rev + now="$(date -u '+%Y-%m-%d %H:%M:%S')" + rev="$(git rev-parse --short HEAD)" local pkg="knative.dev/client/pkg/kn/commands/version" local version="${TAG:-}" # Use vYYYYMMDD-local- for the version string, if not passed. if [[ -z "${version}" ]]; then # Get the commit, excluding any tags but keeping the "dirty" flag - local commit="$(git describe --always --dirty --match '^$')" + local commit + commit="$(git describe --always --dirty --match '^$')" [[ -n "${commit}" ]] || abort "error getting the current commit" version="v$(date +%Y%m%d)-local-${commit}" fi # Extract Eventing and Serving versions from go.mod - local version_serving=$(cat go.mod | grep "knative.dev/serving " | sed -s 's/.* \(v.[\.0-9]*\).*/\1/') - local version_eventing=$(cat go.mod | grep "knative.dev/eventing " | sed -s 's/.* \(v.[\.0-9]*\).*/\1/') + local version_serving version_eventing + version_serving=$(grep "knative.dev/serving " "${base}/go.mod" \ + | sed -s 's/.* \(v.[\.0-9]*\).*/\1/') + version_eventing=$(grep "knative.dev/eventing " "${base}/go.mod" \ + | sed -s 's/.* \(v.[\.0-9]*\).*/\1/') - echo "-X '${pkg}.BuildDate=${now}' -X ${pkg}.Version=${version} -X ${pkg}.GitRevision=${rev} -X ${pkg}.VersionServing=${version_serving} -X ${pkg}.VersionEventing=${version_eventing}" + echo "-X '${pkg}.BuildDate=${now}' \ + -X ${pkg}.Version=${version} \ + -X ${pkg}.GitRevision=${rev} \ + -X ${pkg}.VersionServing=${version_serving} \ + -X ${pkg}.VersionEventing=${version_eventing}\ + ${EXTERNAL_LD_FLAGS:-}" } diff --git a/hack/build.sh b/hack/build.sh index 76817d0eed..9369c109d2 100755 --- a/hack/build.sh +++ b/hack/build.sh @@ -128,7 +128,7 @@ source_lint() { go_build() { echo "🚧 Compile" - go build -mod=vendor -ldflags "$(build_flags $(basedir))" -o kn ./cmd/... + go build -mod=vendor -ldflags "$(build_flags "$(basedir)")" -o kn ./cmd/... if $(file kn | grep -q -i "Windows"); then mv kn kn.exe @@ -136,7 +136,8 @@ go_build() { } go_test() { - local test_output=$(mktemp /tmp/kn-client-test-output.XXXXXX) + local test_output + test_output="$(mktemp /tmp/kn-client-test-output.XXXXXX)" local red="" local reset="" @@ -164,7 +165,8 @@ check_license() { local required_keywords=("Authors" "Apache License" "LICENSE-2.0") local extensions_to_check=("sh" "go" "yaml" "yml" "json") - local check_output=$(mktemp /tmp/kn-client-licence-check.XXXXXX) + local check_output + check_output="$(mktemp /tmp/kn-client-licence-check.XXXXXX)" for ext in "${extensions_to_check[@]}"; do find . -name "*.$ext" -a \! -path "./vendor/*" -a \! -path "./.*" -a \! -path "./third_party/*" -print0 | while IFS= read -r -d '' path; do @@ -242,9 +244,10 @@ basedir() { fi fi - local dir=$(dirname "$script") - local full_dir=$(cd "${dir}/.." && pwd) - echo ${full_dir} + local dir full_dir + dir=$(dirname "$script") + full_dir=$(cd "${dir}/.." && pwd) + echo "${full_dir}" } # Checks if a flag is present in the arguments. @@ -262,8 +265,9 @@ has_flag() { } cross_build() { - local basedir=$(basedir) - local ld_flags="$(build_flags $basedir)" + local basedir ld_flags + basedir=$(basedir) + ld_flags="$(build_flags $basedir)" local failed=0 echo "⚔️ ${S}Compile" @@ -353,9 +357,12 @@ fi source $(basedir)/vendor/knative.dev/hack/library.sh # Shared funcs with CI +while IFS= read -r -d '' file; do + source "${file}" +done < <(find "$(basedir)/hack/build.sh.d" -name '*.sh' -print0) source $(basedir)/hack/build-flags.sh # Fixe emoji labels for certain terminals apply_emoji_fixes -run $* +run "$@" diff --git a/hack/build.sh.d/README.md b/hack/build.sh.d/README.md new file mode 100644 index 0000000000..de123ab660 --- /dev/null +++ b/hack/build.sh.d/README.md @@ -0,0 +1,4 @@ +# Extending build.sh directory + +This directory contains shell scripts named `*.sh`, that can be sourced by +build script, and can be used to extend or override the build script. diff --git a/hack/build.sh.d/kn-plugin-event.sh b/hack/build.sh.d/kn-plugin-event.sh new file mode 100644 index 0000000000..6214926777 --- /dev/null +++ b/hack/build.sh.d/kn-plugin-event.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +knEventVersion="$(grep 'knative.dev/kn-plugin-event ' "$(basedir)/go.mod" \ + | head -n 1 \ + | sed -s 's/.* \(v.[\.0-9]*\).*/\1/')" +knEventRelease="${knEventVersion%.*}" +knEventRelease="${knEventRelease#v}" +readonly default_sender_image="registry.ci.openshift.org/knative/release-${knEventRelease}:client-plugin-event-sender" +readonly sender_image="${KN_PLUGIN_EVENT_SENDER_IMAGE:-${default_sender_image}}" +export EXTERNAL_LD_FLAGS="${EXTERNAL_LD_FLAGS:-} \ +-X knative.dev/kn-plugin-event/pkg/metadata.Image=${sender_image} \ +-X knative.dev/kn-plugin-event/pkg/metadata.Version=${knEventVersion}" diff --git a/pkg/kn/root/plugin_register.go b/pkg/kn/root/plugin_register.go index 8eca53f722..e7956794b3 100644 --- a/pkg/kn/root/plugin_register.go +++ b/pkg/kn/root/plugin_register.go @@ -18,6 +18,7 @@ package root import ( // Add #plugins# import here. Don't remove this line, it triggers an automatic replacement. + _ "knative.dev/kn-plugin-event/pkg/plugin" _ "knative.dev/kn-plugin-source-kafka/plugin" ) diff --git a/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/LICENSE b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/adal/LICENSE b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/adal/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/adal/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/date/LICENSE b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/date/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/date/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/logger/LICENSE b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/logger/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/logger/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/tracing/LICENSE b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/tracing/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/tracing/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/form3tech-oss/jwt-go/LICENSE b/third_party/VENDOR-LICENSE/github.com/form3tech-oss/jwt-go/LICENSE new file mode 100644 index 0000000000..df83a9c2f0 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/form3tech-oss/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/third_party/VENDOR-LICENSE/github.com/ghodss/yaml/LICENSE b/third_party/VENDOR-LICENSE/github.com/ghodss/yaml/LICENSE new file mode 100644 index 0000000000..7805d36de7 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/ghodss/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/VENDOR-LICENSE/github.com/klauspost/compress/LICENSE b/third_party/VENDOR-LICENSE/github.com/klauspost/compress/LICENSE index 1eb75ef68e..87d5574777 100644 --- a/third_party/VENDOR-LICENSE/github.com/klauspost/compress/LICENSE +++ b/third_party/VENDOR-LICENSE/github.com/klauspost/compress/LICENSE @@ -26,3 +26,279 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/third_party/VENDOR-LICENSE/github.com/klauspost/compress/internal/snapref/LICENSE b/third_party/VENDOR-LICENSE/github.com/klauspost/compress/internal/snapref/LICENSE new file mode 100644 index 0000000000..6050c10f4c --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/klauspost/compress/internal/snapref/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/VENDOR-LICENSE/github.com/thediveo/enumflag/LICENSE b/third_party/VENDOR-LICENSE/github.com/thediveo/enumflag/LICENSE new file mode 100644 index 0000000000..d9a10c0d8e --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/thediveo/enumflag/LICENSE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/third_party/VENDOR-LICENSE/github.com/wavesoftware/go-ensure/LICENSE b/third_party/VENDOR-LICENSE/github.com/wavesoftware/go-ensure/LICENSE new file mode 100644 index 0000000000..6c180a6f23 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/wavesoftware/go-ensure/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 Wave Software + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/knative.dev/kn-plugin-event/LICENSE b/third_party/VENDOR-LICENSE/knative.dev/kn-plugin-event/LICENSE new file mode 100644 index 0000000000..059b110a18 --- /dev/null +++ b/third_party/VENDOR-LICENSE/knative.dev/kn-plugin-event/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 Knative CLI Event Contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index b6e1f7b614..5dbe77cc79 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -323,7 +323,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { break } if reqErr != nil { - return "", "", nil + return "", "", reqErr } defer res.Body.Close() if res.StatusCode == http.StatusNotFound { diff --git a/vendor/github.com/Azure/go-autorest/.gitignore b/vendor/github.com/Azure/go-autorest/.gitignore new file mode 100644 index 0000000000..3350aaf706 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/.gitignore @@ -0,0 +1,32 @@ +# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore) +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store +.idea/ +.vscode/ + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# go-autorest specific +vendor/ +autorest/azure/example/example diff --git a/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/vendor/github.com/Azure/go-autorest/CHANGELOG.md new file mode 100644 index 0000000000..d1f596bfc9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/CHANGELOG.md @@ -0,0 +1,1004 @@ +# CHANGELOG + +## v14.2.0 + +- Added package comment to make `github.com/Azure/go-autorest` importable. + +## v14.1.1 + +### Bug Fixes + +- Change `x-ms-authorization-auxiliary` header value separator to comma. + +## v14.1.0 + +### New Features + +- Added `azure.SetEnvironment()` that will update the global environments map with the specified values. + +## v14.0.1 + +### Bug Fixes + +- Fix race condition when refreshing token. +- Fixed some tests to work with Go 1.14. + +## v14.0.0 + +## Breaking Changes + +- By default, the `DoRetryForStatusCodes` functions will no longer infinitely retry a request when the response returns an HTTP status code of 429 (StatusTooManyRequests). To opt in to the old behavior set `autorest.Count429AsRetry` to `false`. + +## New Features + +- Variable `autorest.Max429Delay` can be used to control the maximum delay between retries when a 429 is received with no `Retry-After` header. The default is zero which means there is no cap. + +## v13.4.0 + +## New Features + +- Added field `SendDecorators` to the `Client` type. This can be used to specify a custom chain of SendDecorators per client. +- Added method `Client.Send()` which includes logic for selecting the preferred chain of SendDecorators. + +## v13.3.3 + +### Bug Fixes + +- Fixed connection leak when retrying requests. +- Enabled exponential back-off with a 2-minute cap when retrying on 429. +- Fixed some cases where errors were inadvertently dropped. + +## v13.3.2 + +### Bug Fixes + +- Updated `autorest.AsStringSlice()` to convert slice elements to their string representation. + +## v13.3.1 + +- Updated external dependencies. + +### Bug Fixes + +## v13.3.0 + +### New Features + +- Added support for shared key and shared access signature token authorization. + - `autorest.NewSharedKeyAuthorizer()` and dependent types. + - `autorest.NewSASTokenAuthorizer()` and dependent types. +- Added `ServicePrincipalToken.SetCustomRefresh()` so a custom refresh function can be invoked when a token has expired. + +### Bug Fixes + +- Fixed `cli.AccessTokensPath()` to respect `AZURE_CONFIG_DIR` when set. +- Support parsing error messages in XML responses. + +## v13.2.0 + +### New Features + +- Added the following functions to replace their versions that don't take a context. + - `adal.InitiateDeviceAuthWithContext()` + - `adal.CheckForUserCompletionWithContext()` + - `adal.WaitForUserCompletionWithContext()` + +## v13.1.0 + +### New Features + +- Added support for MSI authentication on Azure App Service and Azure Functions. + +## v13.0.2 + +### Bug Fixes + +- Always retry a request even if the sender returns a non-nil error. + +## v13.0.1 + +## Bug Fixes + +- Fixed `autorest.WithQueryParameters()` so that it properly encodes multi-value query parameters. + +## v13.0.0 + +## Breaking Changes + +The `tracing` package has been rewritten to provide a common interface for consumers to wire in the tracing package of their choice. +What this means is that by default no tracing provider will be compiled into your program and setting the `AZURE_SDK_TRACING_ENABLED` +environment variable will have no effect. To enable this previous behavior you must now add the following import to your source file. +```go + import _ "github.com/Azure/go-autorest/tracing/opencensus" +``` +The APIs required by autorest-generated code have remained but some APIs have been removed and new ones added. +The following APIs and variables have been removed (the majority of them were moved to the `opencensus` package). +- tracing.Transport +- tracing.Enable() +- tracing.EnableWithAIForwarding() +- tracing.Disable() + +The following APIs and types have been added +- tracing.Tracer +- tracing.Register() + +To hook up a tracer simply call `tracing.Register()` passing in a type that satisfies the `tracing.Tracer` interface. + +## v12.4.3 + +### Bug Fixes + +- `autorest.MultiTenantServicePrincipalTokenAuthorizer` will now properly add its auxiliary bearer tokens. + +## v12.4.2 + +### Bug Fixes + +- Improvements to the fixes made in v12.4.1. + - Remove `override` stanza from Gopkg.toml and `replace` directive from go.mod as they don't apply when being consumed as a dependency. + - Switched to latest version of `ocagent` that still depends on protobuf v1.2. + - Add indirect dependencies to the `required` clause with matching `constraint` stanzas so that `dep` dependencies match go.sum. + +## v12.4.1 + +### Bug Fixes + +- Updated OpenCensus and OCAgent versions to versions that don't depend on v1.3+ of protobuf as it was breaking kubernetes. +- Pinned opencensus-proto to a version that's compatible with our versions of OpenCensus and OCAgent. + +## v12.4.0 + +### New Features + +- Added `autorest.WithPrepareDecorators` and `autorest.GetPrepareDecorators` for adding and retrieving a custom chain of PrepareDecorators to the provided context. + +## v12.3.0 + +### New Features + +- Support for multi-tenant via x-ms-authorization-auxiliary header has been added for client credentials with + secret scenario; this basically bundles multiple OAuthConfig and ServicePrincipalToken types into corresponding + MultiTenant* types along with a new authorizer that adds the primary and auxiliary token headers to the reqest. + The authenticaion helpers have been updated to support this scenario; if environment var AZURE_AUXILIARY_TENANT_IDS + is set with a semicolon delimited list of tenants the multi-tenant codepath will kick in to create the appropriate authorizer. + See `adal.NewMultiTenantOAuthConfig`, `adal.NewMultiTenantServicePrincipalToken` and `autorest.NewMultiTenantServicePrincipalTokenAuthorizer` + along with their supporting types and methods. +- Added `autorest.WithSendDecorators` and `autorest.GetSendDecorators` for adding and retrieving a custom chain of SendDecorators to the provided context. +- Added `autorest.DoRetryForStatusCodesWithCap` and `autorest.DelayForBackoffWithCap` to enforce an upper bound on the duration between retries. + +## v12.2.0 + +### New Features + +- Added `autorest.WithXML`, `autorest.AsMerge`, `autorest.WithBytes` preparer decorators. +- Added `autorest.ByUnmarshallingBytes` response decorator. +- Added `Response.IsHTTPStatus` and `Response.HasHTTPStatus` helper methods for inspecting HTTP status code in `autorest.Response` types. + +### Bug Fixes + +- `autorest.DelayWithRetryAfter` now supports HTTP-Dates in the `Retry-After` header and is not limited to just 429 status codes. + +## v12.1.0 + +### New Features + +- Added `to.ByteSlicePtr()`. +- Added blob/queue storage resource ID to `azure.ResourceIdentifier`. + +## v12.0.0 + +### Breaking Changes + +In preparation for modules the following deprecated content has been removed. + + - async.NewFuture() + - async.Future.Done() + - async.Future.WaitForCompletion() + - async.DoPollForAsynchronous() + - The `utils` package + - validation.NewErrorWithValidationError() + - The `version` package + +## v11.9.0 + +### New Features + +- Add `ResourceIdentifiers` field to `azure.Environment` containing resource IDs for public and sovereign clouds. + +## v11.8.0 + +### New Features + +- Added `autorest.NewClientWithOptions()` to support endpoints that require free renegotiation. + +## v11.7.1 + +### Bug Fixes + +- Fix missing support for http(s) proxy when using the default sender. + +## v11.7.0 + +### New Features + +- Added methods to obtain a ServicePrincipalToken on the various credential configuration types in the `auth` package. + +## v11.6.1 + +### Bug Fixes + +- Fix ACR DNS endpoint for government clouds. +- Add Cosmos DB DNS endpoints. +- Update dependencies to resolve build breaks in OpenCensus. + +## v11.6.0 + +### New Features + +- Added type `autorest.BasicAuthorizer` to support Basic authentication. + +## v11.5.2 + +### Bug Fixes + +- Fixed `GetTokenFromCLI` did not work with zsh. + +## v11.5.1 + +### Bug Fixes + +- In `Client.sender()` set the minimum TLS version on HTTP clients to 1.2. + +## v11.5.0 + +### New Features + +- The `auth` package has been refactored so that the environment and file settings are now available. +- The methods used in `auth.NewAuthorizerFromEnvironment()` are now exported so that custom authorization chains can be created. +- Added support for certificate authorization for file-based config. + +## v11.4.0 + +### New Features + +- Added `adal.AddToUserAgent()` so callers can append custom data to the user-agent header used for ADAL requests. +- Exported `adal.UserAgent()` for parity with `autorest.Client`. + +## v11.3.2 + +### Bug Fixes + +- In `Future.WaitForCompletionRef()` if the provided context has a deadline don't add the default deadline. + +## v11.3.1 + +### Bug Fixes + +- For an LRO PUT operation the final GET URL was incorrectly set to the Location polling header in some cases. + +## v11.3.0 + +### New Features + +- Added method `ServicePrincipalToken()` to `DeviceFlowConfig` type. + +## v11.2.8 + +### Bug Fixes + +- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package. + +## v11.2.7 + +### Bug Fixes + +- Fix environment variable name for enabling tracing from `AZURE_SDK_TRACING_ENABELD` to `AZURE_SDK_TRACING_ENABLED`. + Note that for backward compatibility reasons, both will work until the next major version release of the package. + +## v11.2.6 + +### Bug Fixes + +- If zero bytes are read from a polling response body don't attempt to unmarshal them. + +## v11.2.5 + +### Bug Fixes + +- Removed race condition in `autorest.DoRetryForStatusCodes`. + +## v11.2.4 + +### Bug Fixes + +- Function `cli.ProfilePath` now respects environment `AZURE_CONFIG_DIR` if available. + +## v11.2.1 + +NOTE: Versions of Go prior to 1.10 have been removed from CI as they no +longer work with golint. + +### Bug Fixes + +- Method `MSIConfig.Authorizer` now supports user-assigned identities. +- The adal package now reports its own user-agent string. + +## v11.2.0 + +### New Features + +- Added `tracing` package that enables instrumentation of HTTP and API calls. + Setting the env variable `AZURE_SDK_TRACING_ENABLED` or calling `tracing.Enable` + will start instrumenting the code for metrics and traces. + Additionally, setting the env variable `OCAGENT_TRACE_EXPORTER_ENDPOINT` or + calling `tracing.EnableWithAIForwarding` will start the instrumentation and connect to an + App Insights Local Forwarder that is needs to be running. Note that if the + AI Local Forwarder is not running tracking will still be enabled. + By default, instrumentation is disabled. Once enabled, instrumentation can also + be programatically disabled by calling `Disable`. +- Added `DoneWithContext` call for checking LRO status. `Done` has been deprecated. + +### Bug Fixes + +- Don't use the initial request's context for LRO polling. +- Don't override the `refreshLock` and the `http.Client` when unmarshalling `ServicePrincipalToken` if + it is already set. + +## v11.1.1 + +### Bug Fixes + +- When creating a future always include the polling tracker even if there's a failure; this allows the underlying response to be obtained by the caller. + +## v11.1.0 + +### New Features + +- Added `auth.NewAuthorizerFromCLI` to create an authorizer configured from the Azure 2.0 CLI. +- Added `adal.NewOAuthConfigWithAPIVersion` to create an OAuthConfig with the specified API version. + +## v11.0.1 + +### New Features + +- Added `x5c` header to client assertion for certificate Issuer+Subject Name authentication. + +## v11.0.0 + +### Breaking Changes + +- To handle differences between ADFS and AAD the following fields have had their types changed from `string` to `json.Number` + - ExpiresIn + - ExpiresOn + - NotBefore + +### New Features + +- Added `auth.NewAuthorizerFromFileWithResource` to create an authorizer from the config file with the specified resource. +- Setting a client's `PollingDuration` to zero will use the provided context to control a LRO's polling duration. + +## v10.15.5 + +### Bug Fixes + +- In `DoRetryForStatusCodes`, if a request's context is cancelled return the last response. + +## v10.15.4 + +### Bug Fixes + +- If a polling operation returns a failure status code return the associated error. + +## v10.15.3 + +### Bug Fixes + +- Initialize the polling URL and method for an LRO tracker on each iteration, favoring the Azure-AsyncOperation header. + +## v10.15.2 + +### Bug Fixes + +- Use fmt.Fprint when printing request/response so that any escape sequences aren't treated as format specifiers. + +## v10.15.1 + +### Bug Fixes + +- If an LRO API returns a `Failed` provisioning state in the initial response return an error at that point so the caller doesn't have to poll. +- For failed LROs without an OData v4 error include the response body in the error's `AdditionalInfo` field to aid in diagnosing the failure. + +## v10.15.0 + +### New Features + +- Add initial support for request/response logging via setting environment variables. + Setting `AZURE_GO_SDK_LOG_LEVEL` to `LogInfo` will log request/response + without their bodies. To include the bodies set the log level to `LogDebug`. + By default the logger writes to strerr, however it can also write to stdout or a file + if specified in `AZURE_GO_SDK_LOG_FILE`. Note that if the specified file + already exists it will be truncated. + IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key + headers. Any other secrets will _not_ be redacted. + +## v10.14.0 + +### New Features + +- Added package version that contains version constants and user-agent data. + +### Bug Fixes + +- Add the user-agent to token requests. + +## v10.13.0 + +- Added support for additionalInfo in ServiceError type. + +## v10.12.0 + +### New Features + +- Added field ServicePrincipalToken.MaxMSIRefreshAttempts to configure the maximun number of attempts to refresh an MSI token. + +## v10.11.4 + +### Bug Fixes + +- If an LRO returns http.StatusOK on the initial response with no async headers return the response body from Future.GetResult(). +- If there is no "final GET URL" return an error from Future.GetResult(). + +## v10.11.3 + +### Bug Fixes + +- In IMDS retry logic, if we don't receive a response don't retry. + - Renamed the retry function so it's clear it's meant for IMDS only. +- For error response bodies that aren't OData-v4 compliant stick the raw JSON in the ServiceError.Details field so the information isn't lost. + - Also add the raw HTTP response to the DetailedResponse. +- Removed superfluous wrapping of response error in azure.DoRetryWithRegistration(). + +## v10.11.2 + +### Bug Fixes + +- Validation for integers handles int and int64 types. + +## v10.11.1 + +### Bug Fixes + +- Adding User information to authorization config as parsed from CLI cache. + +## v10.11.0 + +### New Features + +- Added NewServicePrincipalTokenFromManualTokenSecret for creating a new SPT using a manual token and secret +- Added method ServicePrincipalToken.MarshalTokenJSON() to marshall the inner Token + +## v10.10.0 + +### New Features + +- Most ServicePrincipalTokens can now be marshalled/unmarshall to/from JSON (ServicePrincipalCertificateSecret and ServicePrincipalMSISecret are not supported). +- Added method ServicePrincipalToken.SetRefreshCallbacks(). + +## v10.9.2 + +### Bug Fixes + +- Refreshing a refresh token obtained from a web app authorization code now works. + +## v10.9.1 + +### Bug Fixes + +- The retry logic for MSI token requests now uses exponential backoff per the guidelines. +- IsTemporaryNetworkError() will return true for errors that don't implement the net.Error interface. + +## v10.9.0 + +### Deprecated Methods + +| Old Method | New Method | +| -------------------------: | :---------------------------: | +| azure.NewFuture() | azure.NewFutureFromResponse() | +| Future.WaitForCompletion() | Future.WaitForCompletionRef() | + +### New Features + +- Added azure.NewFutureFromResponse() for creating a Future from the initial response from an async operation. +- Added Future.GetResult() for making the final GET call to retrieve the result from an async operation. + +### Bug Fixes + +- Some futures failed to return their results, this should now be fixed. + +## v10.8.2 + +### Bug Fixes + +- Add nil-gaurd to token retry logic. + +## v10.8.1 + +### Bug Fixes + +- Return a TokenRefreshError if the sender fails on the initial request. +- Don't retry on non-temporary network errors. + +## v10.8.0 + +- Added NewAuthorizerFromEnvironmentWithResource() helper function. + +## v10.7.0 + +### New Features + +- Added \*WithContext() methods to ADAL token refresh operations. + +## v10.6.2 + +- Fixed a bug on device authentication. + +## v10.6.1 + +- Added retries to MSI token get request. + +## v10.6.0 + +- Changed MSI token implementation. Now, the token endpoint is the IMDS endpoint. + +## v10.5.1 + +### Bug Fixes + +- `DeviceFlowConfig.Authorizer()` now prints the device code message when running `go test`. `-v` flag is required. + +## v10.5.0 + +### New Features + +- Added NewPollingRequestWithContext() for use with polling asynchronous operations. + +### Bug Fixes + +- Make retry logic use the request's context instead of the deprecated Cancel object. + +## v10.4.0 + +### New Features + +- Added helper for parsing Azure Resource ID's. +- Added deprecation message to utils.GetEnvVarOrExit() + +## v10.3.0 + +### New Features + +- Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints +- Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint + +## v10.2.0 + +### New Features + +- Added endpoints for batch management. + +## v10.1.3 + +### Bug Fixes + +- In Client.Do() invoke WithInspection() last so that it will inspect WithAuthorization(). +- Fixed authorization methods to invoke p.Prepare() first, aligning them with the other preparers. + +## v10.1.2 + +- Corrected comment for auth.NewAuthorizerFromFile() function. + +## v10.1.1 + +- Updated version number to match current release. + +## v10.1.0 + +### New Features + +- Expose the polling URL for futures. + +### Bug Fixes + +- Add validation.NewErrorWithValidationError back to prevent breaking changes (it is deprecated). + +## v10.0.0 + +### New Features + +- Added target and innererror fields to ServiceError to comply with OData v4 spec. +- The Done() method on futures will now return a ServiceError object when available (it used to return a partial value of such errors). +- Added helper methods for obtaining authorizers. +- Expose the polling URL for futures. + +### Bug Fixes + +- Switched from glide to dep for dependency management. +- Fixed unmarshaling of ServiceError for JSON bodies that don't conform to the OData spec. +- Fixed a race condition in token refresh. + +### Breaking Changes + +- The ServiceError.Details field type has been changed to match the OData v4 spec. +- Go v1.7 has been dropped from CI. +- API parameter validation failures will now return a unique error type validation.Error. +- The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race). + +## v9.10.0 + +- Fix the Service Bus suffix in Azure public env +- Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control) + +## v9.9.0 + +### New Features + +- Added EventGridKeyAuthorizer for key authorization with event grid topics. + +### Bug Fixes + +- Fixed race condition when auto-refreshing service principal tokens. + +## v9.8.1 + +### Bug Fixes + +- Added http.StatusNoContent (204) to the list of expected status codes for long-running operations. +- Updated runtime version info so it's current. + +## v9.8.0 + +### New Features + +- Added type azure.AsyncOpIncompleteError to be returned from a future's Result() method when the operation has not completed. + +## v9.7.1 + +### Bug Fixes + +- Use correct AAD and Graph endpoints for US Gov environment. + +## v9.7.0 + +### New Features + +- Added support for application/octet-stream MIME types. + +## v9.6.1 + +### Bug Fixes + +- Ensure Authorization header is added to request when polling for registration status. + +## v9.6.0 + +### New Features + +- Added support for acquiring tokens via MSI with a user assigned identity. + +## v9.5.3 + +### Bug Fixes + +- Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters. +- Set correct Content Type when using autorest.WithFormData. + +## v9.5.2 + +### Bug Fixes + +- Check for nil \*http.Response before dereferencing it. + +## v9.5.1 + +### Bug Fixes + +- Don't count http.StatusTooManyRequests (429) against the retry cap. +- Use retry logic when SkipResourceProviderRegistration is set to true. + +## v9.5.0 + +### New Features + +- Added support for username + password, API key, authoriazation code and cognitive services authentication. +- Added field SkipResourceProviderRegistration to clients to provide a way to skip auto-registration of RPs. +- Added utility function AsStringSlice() to convert its parameters to a string slice. + +### Bug Fixes + +- When checking for authentication failures look at the error type not the status code as it could vary. + +## v9.4.2 + +### Bug Fixes + +- Validate parameters when creating credentials. +- Don't retry requests if the returned status is a 401 (http.StatusUnauthorized) as it will never succeed. + +## v9.4.1 + +### Bug Fixes + +- Update the AccessTokensPath() to read access tokens path through AZURE_ACCESS_TOKEN_FILE. If this + environment variable is not set, it will fall back to use default path set by Azure CLI. +- Use case-insensitive string comparison for polling states. + +## v9.4.0 + +### New Features + +- Added WaitForCompletion() to Future as a default polling implementation. + +### Bug Fixes + +- Method Future.Done() shouldn't update polling status for unexpected HTTP status codes. + +## v9.3.1 + +### Bug Fixes + +- DoRetryForStatusCodes will retry if sender.Do returns a non-nil error. + +## v9.3.0 + +### New Features + +- Added PollingMethod() to Future so callers know what kind of polling mechanism is used. +- Added azure.ChangeToGet() which transforms an http.Request into a GET (to be used with LROs). + +## v9.2.0 + +### New Features + +- Added support for custom Azure Stack endpoints. +- Added type azure.Future used to track the status of long-running operations. + +### Bug Fixes + +- Preserve the original error in DoRetryWithRegistration when registration fails. + +## v9.1.1 + +- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`. + +## v9.1.0 + +### New Features + +- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error. +- Support for loading Azure CLI Authentication files. +- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously. + +### Bug Fixes + +- RetriableRequest can now tolerate a ReadSeekable body being read but not reset. +- Adding missing Apache Headers + +## v9.0.0 + +> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes. + +Adding MSI Endpoint Support and CLI token rehydration. + +## v8.3.1 + +Pick up bug fix in adal for MSI support. + +## v8.3.0 + +Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience. + +## v8.2.0 + +### New Features + +- Add support for bearer authentication callbacks +- Support 429 response codes that include "Retry-After" header +- Support validation constraint "Pattern" for map keys + +### Bug Fixes + +- Make RetriableRequest work with multiple versions of Go + +## v8.1.1 + +Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. + +## v8.1.0 + +Adds RetriableRequest type for more efficient handling of retrying HTTP requests. + +## v8.0.0 + +ADAL refactored into its own package. +Support for UNIX time. + +## v7.3.1 + +- Version Testing now removed from production bits that are shipped with the library. + +## v7.3.0 + +- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations + to acknowledge that they do not need either the entire or a trailing portion + of accepts response body. In doing so, Go's http library can reuse HTTP + connections more readily. +- Adding `PrepareDecorator` to target custom BaseURLs. +- Adding ACR suffix to public cloud environment. +- Updating Glide dependencies. + +## v7.2.5 + +- Fixed the Active Directory endpoint for the China cloud. +- Removes UTF-8 BOM if present in response payload. +- Added telemetry. + +## v7.2.3 + +- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay + duration. + +## v7.2.2 + +- autorest/azure: added ASM and ARM VM DNS suffixes. + +## v7.2.1 + +- fixed parsing of UTC times that are not RFC3339 conformant. + +## v7.2.0 + +- autorest/validation: Reformat validation error for better error message. + +## v7.1.0 + +- preparer: Added support for multipart formdata - WithMultiPartFormdata() +- preparer: Added support for sending file in request body - WithFile +- client: Added RetryDuration parameter. +- autorest/validation: new package for validation code for Azure Go SDK. + +## v7.0.7 + +- Add trailing / to endpoint +- azure: add EnvironmentFromName + +## v7.0.6 + +- Add retry logic for 408, 500, 502, 503 and 504 status codes. +- Change url path and query encoding logic. +- Fix DelayForBackoff for proper exponential delay. +- Add CookieJar in Client. + +## v7.0.5 + +- Add check to start polling only when status is in [200,201,202]. +- Refactoring for unchecked errors. +- azure/persist changes. +- Fix 'file in use' issue in renewing token in deviceflow. +- Store header RetryAfter for subsequent requests in polling. +- Add attribute details in service error. + +## v7.0.4 + +- Better error messages for long running operation failures + +## v7.0.3 + +- Corrected DoPollForAsynchronous to properly handle the initial response + +## v7.0.2 + +- Corrected DoPollForAsynchronous to continue using the polling method first discovered + +## v7.0.1 + +- Fixed empty JSON input error in ByUnmarshallingJSON +- Fixed polling support for GET calls +- Changed format name from TimeRfc1123 to TimeRFC1123 + +## v7.0.0 + +- Added ByCopying responder with supporting TeeReadCloser +- Rewrote Azure asynchronous handling +- Reverted to only unmarshalling JSON +- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format + +The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since +`encoding/json` successfully deserializes all core types, and extended types normally provide +their custom JSON serialization handlers, the code has been reverted back to using +`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate +code; there is no loss of function, and there is a gain in accuracy, by reverting. + +Additionally, Azure services indicate requests to be polled by multiple means. The existing code +only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header). +The new code correctly covers all cases and aligns with the other Azure SDKs. + +## v6.1.0 + +- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. + +## v6.0.0 + +- Completely reworked the handling of polled and asynchronous requests +- Removed unnecessary routines +- Reworked `mocks.Sender` to replay a series of `http.Response` objects +- Added `PrepareDecorators` for primitive types (e.g., bool, int32) + +Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new +`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes` +and `azure.DoPollForAsynchronous` for examples. + +## v5.0.0 + +- Added new RespondDecorators unmarshalling primitive types +- Corrected application of inspection and authorization PrependDecorators + +## v4.0.0 + +- Added support for Azure long-running operations. +- Added cancelation support to all decorators and functions that may delay. +- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. + +## v3.1.0 + +- Add support for OAuth Device Flow authorization. +- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. +- Add helpers for persisting and restoring Tokens. +- Increased code coverage in the github.com/Azure/autorest/azure package + +## v3.0.0 + +- Breaking: `NewErrorWithError` no longer takes `statusCode int`. +- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. +- Breaking: `Client#Send()` no longer takes `codes ...int` argument. +- Add: XML unmarshaling support with `ByUnmarshallingXML()` +- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide). + Applications using this library should either use Glide or vendor dependencies locally some other way. +- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors. +- Fix: use `net/http.DefaultClient` as base client. +- Fix: Missing inspection for polling responses added. +- Add: CopyAndDecode helpers. +- Improved `./autorest/to` with `[]string` helpers. +- Removed golint suppressions in .travis.yml. + +## v2.1.0 + +- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any) + +## v2.0.0 + +- Changed `to.StringMapPtr` method signature to return a pointer +- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys + +## v1.0.0 + +- Added Logging inspectors to trace http.Request / Response +- Added support for User-Agent header +- Changed WithHeader PrepareDecorator to use set vs. add +- Added JSON to error when unmarshalling fails +- Added Client#Send method +- Corrected case of "Azure" in package paths +- Added "to" helpers, Azure helpers, and improved ease-of-use +- Corrected golint issues + +## v1.0.1 + +- Added CHANGELOG.md + +## v1.1.0 + +- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT +- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate + +## v1.1.1 + +- Introduce godeps and vendor dependencies introduced in v1.1.1 diff --git a/vendor/github.com/Azure/go-autorest/GNUmakefile b/vendor/github.com/Azure/go-autorest/GNUmakefile new file mode 100644 index 0000000000..a434e73ac4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/GNUmakefile @@ -0,0 +1,23 @@ +DIR?=./autorest/ + +default: build + +build: fmt + go install $(DIR) + +test: + go test $(DIR) || exit 1 + +vet: + @echo "go vet ." + @go vet $(DIR)... ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi + +fmt: + gofmt -w $(DIR) + +.PHONY: build test vet fmt diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.lock b/vendor/github.com/Azure/go-autorest/Gopkg.lock new file mode 100644 index 0000000000..dc6e3e633e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.lock @@ -0,0 +1,324 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e" + name = "contrib.go.opencensus.io/exporter/ocagent" + packages = ["."] + pruneopts = "UT" + revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea" + version = "v0.6.0" + +[[projects]] + digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20" + name = "github.com/census-instrumentation/opencensus-proto" + packages = [ + "gen-go/agent/common/v1", + "gen-go/agent/metrics/v1", + "gen-go/agent/trace/v1", + "gen-go/metrics/v1", + "gen-go/resource/v1", + "gen-go/trace/v1", + ] + pruneopts = "UT" + revision = "d89fa54de508111353cb0b06403c00569be780d8" + version = "v0.2.1" + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + pruneopts = "UT" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + +[[projects]] + digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965" + name = "github.com/dimchansky/utfbom" + packages = ["."] + pruneopts = "UT" + revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "UT" + revision = "611e8accdfc92c4187d399e95ce826046d4c8d73" + +[[projects]] + digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa" + name = "github.com/golang/protobuf" + packages = [ + "descriptor", + "jsonpb", + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/struct", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "UT" + revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" + version = "v1.3.2" + +[[projects]] + digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806" + name = "github.com/grpc-ecosystem/grpc-gateway" + packages = [ + "internal", + "runtime", + "utilities", + ] + pruneopts = "UT" + revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009" + version = "v1.12.1" + +[[projects]] + digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + pruneopts = "UT" + revision = "af06845cf3004701891bf4fdb884bfe4920b3727" + version = "v1.1.0" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + ] + pruneopts = "UT" + revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" + version = "v1.4.0" + +[[projects]] + digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71" + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "plugin/ochttp/propagation/tracecontext", + "resource", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "UT" + revision = "aad2c527c5defcf89b5afab7f37274304195a6b2" + version = "v0.22.2" + +[[projects]] + branch = "master" + digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae" + name = "golang.org/x/crypto" + packages = [ + "pkcs12", + "pkcs12/internal/rc2", + ] + pruneopts = "UT" + revision = "e9b2fee46413994441b28dfca259d911d963dfed" + +[[projects]] + branch = "master" + digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43" + name = "golang.org/x/lint" + packages = [ + ".", + "golint", + ] + pruneopts = "UT" + revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448" + +[[projects]] + branch = "master" + digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910" + name = "golang.org/x/net" + packages = [ + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "1ddd1de85cb0337b623b740a609d35817d516a8d" + +[[projects]] + branch = "master" + digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "UT" + revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb" + +[[projects]] + branch = "master" + digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945" + +[[projects]] + digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + +[[projects]] + branch = "master" + digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "go/gcexportdata", + "go/internal/gcimporter", + "go/types/typeutil", + ] + pruneopts = "UT" + revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42" + +[[projects]] + digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877" + name = "google.golang.org/api" + packages = ["support/bundler"] + pruneopts = "UT" + revision = "8a410c21381766a810817fd6200fce8838ecb277" + version = "v0.14.0" + +[[projects]] + branch = "master" + digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api/httpbody", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "UT" + revision = "51378566eb590fa106d1025ea12835a4416dda84" + +[[projects]] + digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301" + name = "google.golang.org/grpc" + packages = [ + ".", + "backoff", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/buffer", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/resolver/dns", + "internal/resolver/passthrough", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "serviceconfig", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514" + version = "v1.25.1" + +[[projects]] + digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce" + version = "v2.2.7" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "contrib.go.opencensus.io/exporter/ocagent", + "github.com/dgrijalva/jwt-go", + "github.com/dimchansky/utfbom", + "github.com/mitchellh/go-homedir", + "github.com/stretchr/testify/require", + "go.opencensus.io/plugin/ochttp", + "go.opencensus.io/plugin/ochttp/propagation/tracecontext", + "go.opencensus.io/stats/view", + "go.opencensus.io/trace", + "golang.org/x/crypto/pkcs12", + "golang.org/x/lint/golint", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.toml b/vendor/github.com/Azure/go-autorest/Gopkg.toml new file mode 100644 index 0000000000..1fc2865969 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.toml @@ -0,0 +1,59 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + +required = ["golang.org/x/lint/golint"] + +[prune] + go-tests = true + unused-packages = true + +[[constraint]] + name = "contrib.go.opencensus.io/exporter/ocagent" + version = "0.6.0" + +[[constraint]] + name = "github.com/dgrijalva/jwt-go" + version = "3.2.0" + +[[constraint]] + name = "github.com/dimchansky/utfbom" + version = "1.1.0" + +[[constraint]] + name = "github.com/mitchellh/go-homedir" + version = "1.1.0" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.3.0" + +[[constraint]] + name = "go.opencensus.io" + version = "0.22.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/crypto" diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md new file mode 100644 index 0000000000..de1e19a44d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/README.md @@ -0,0 +1,165 @@ +# go-autorest + +[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/Azure.go-autorest?branchName=master)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master) +[![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) + +Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages. + +An authentication client tested with Azure Active Directory (AAD) is also +provided in this repo in the package +`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package +is maintained only as part of the Azure Go SDK and is not related to other +"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD). + +## Overview + +Package go-autorest implements an HTTP request pipeline suitable for use across +multiple goroutines and provides the shared routines used by packages generated +by [Autorest](https://github.com/Azure/autorest.go). + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + +```go + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) +``` + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + +```go + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) +``` + +will set the URL to: + +``` + https://microsoft.com/a/b/c +``` + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., `ByUnmarshallingJson`) is likely incorrect. + +Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. + +## Helpers + +### Handling Swagger Dates + +The Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct +parsing and formatting. + +### Handling Empty Values + +In JSON, missing values have different semantics than empty values. This is especially true for +services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains +only those values to modify. Missing values are to be left unchanged. Developers, then, require a +means to both specify an empty value and to leave the value out of the submitted JSON. + +The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits +empty values from the rendered JSON. Since Go defines default values for all base types (such as "" +for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package +treats default values as meaning empty, omitting them from the rendered JSON. This means that, using +the Go base types encoded through the default JSON package, it is not possible to create JSON to +clear a value at the server. + +The workaround within the Go community is to use pointers to base types in lieu of base types within +structures that map to JSON. For example, instead of a value of type `string`, the workaround uses +`*string`. While this enables distinguishing empty values from those to be unchanged, creating +pointers to a base type (notably constant, in-line values) requires additional variables. This, for +example, + +```go + s := struct { + S *string + }{ S: &"foo" } +``` +fails, while, this + +```go + v := "foo" + s := struct { + S *string + }{ S: &v } +``` +succeeds. + +To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for +Go base types which have Swagger analogs. It also provides a helper that converts between +`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value +associated with a key should be cleared. With the helpers, the previous example becomes + +```go + s := struct { + S *string + }{ S: to.StringPtr("foo") } +``` + +## Install + +```bash +go get github.com/Azure/go-autorest/autorest +go get github.com/Azure/go-autorest/autorest/azure +go get github.com/Azure/go-autorest/autorest/date +go get github.com/Azure/go-autorest/autorest/to +``` + +### Using with Go Modules +In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules. + +- autorest/adal +- autorest/azure/auth +- autorest/azure/cli +- autorest/date +- autorest/mocks +- autorest/to +- autorest/validation +- autorest +- logger +- tracing + +Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules. + +## License + +See LICENSE file. + +----- + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. diff --git a/vendor/github.com/Azure/go-autorest/autorest/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 0000000000..fec416a9c4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,292 @@ +# Azure Active Directory authentication for Go + +This is a standalone package for authenticating with Azure Active +Directory from other Go libraries and applications, in particular the [Azure SDK +for Go](https://github.com/Azure/azure-sdk-for-go). + +Note: Despite the package's name it is not related to other "ADAL" libraries +maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues +should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) +or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue +trackers. + +## Install + +```bash +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + *oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + *oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + *oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Username password authenticate + +```Go +spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( + *oauthConfig, + applicationID, + username, + password, + resource, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Authorization code authenticate + +``` Go +spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( + *oauthConfig, + applicationID, + clientSecret, + authorizationCode, + redirectURI, + resource, + callbacks...) + +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 0000000000..fa5964742f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,151 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" +) + +const ( + activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL `json:"authorityEndpoint"` + AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` + TokenEndpoint url.URL `json:"tokenEndpoint"` + DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` +} + +// IsZero returns true if the OAuthConfig object is zero-initialized. +func (oac OAuthConfig) IsZero() bool { + return oac == OAuthConfig{} +} + +func validateStringParam(param, name string) error { + if len(param) == 0 { + return fmt.Errorf("parameter '" + name + "' cannot be empty") + } + return nil +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + apiVer := "1.0" + return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) +} + +// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. +// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. +func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { + if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { + return nil, err + } + api := "" + // it's legal for tenantID to be empty so don't validate it + if apiVersion != nil { + if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { + return nil, err + } + api = fmt.Sprintf("?api-version=%s", *apiVersion) + } + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} + +// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. +type MultiTenantOAuthConfig interface { + PrimaryTenant() *OAuthConfig + AuxiliaryTenants() []*OAuthConfig +} + +// OAuthOptions contains optional OAuthConfig creation arguments. +type OAuthOptions struct { + APIVersion string +} + +func (c OAuthOptions) apiVersion() string { + if c.APIVersion != "" { + return fmt.Sprintf("?api-version=%s", c.APIVersion) + } + return "1.0" +} + +// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. +func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { + if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { + return nil, errors.New("must specify one to three auxiliary tenants") + } + mtCfg := multiTenantOAuthConfig{ + cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), + } + apiVer := options.apiVersion() + pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) + } + mtCfg.cfgs[0] = pri + for i := range auxiliaryTenantIDs { + aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) + } + mtCfg.cfgs[i+1] = aux + } + return mtCfg, nil +} + +type multiTenantOAuthConfig struct { + // first config in the slice is the primary tenant + cfgs []*OAuthConfig +} + +func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { + return m.cfgs[0] +} + +func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { + return m.cfgs[1:] +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go new file mode 100644 index 0000000000..9daa4b58b8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -0,0 +1,273 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + logPrefix = "autorest/adal/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +// Deprecated: use InitiateDeviceAuthWithContext() instead. +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource) +} + +// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty + } + + var code DeviceCode + err = json.Unmarshal(rb, &code) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +// Deprecated: use CheckForUserCompletionWithContext() instead. +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return CheckForUserCompletionWithContext(context.Background(), sender, code) +} + +// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty + } + + var token deviceToken + err = json.Unmarshal(rb, &token) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + // return a more meaningful error message if available + if token.ErrorDescription != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, *token.Error, *token.ErrorDescription) + } + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +// Deprecated: use WaitForUserCompletionWithContext() instead. +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return WaitForUserCompletionWithContext(context.Background(), sender, code) +} + +// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error +// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletionWithContext(ctx, sender, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + select { + case <-time.After(waitDuration): + // noop + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod new file mode 100644 index 0000000000..abcc27d4cc --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod @@ -0,0 +1,12 @@ +module github.com/Azure/go-autorest/autorest/adal + +go 1.12 + +require ( + github.com/Azure/go-autorest v14.2.0+incompatible + github.com/Azure/go-autorest/autorest/date v0.3.0 + github.com/Azure/go-autorest/autorest/mocks v0.4.1 + github.com/Azure/go-autorest/tracing v0.6.0 + github.com/form3tech-oss/jwt-go v3.2.2+incompatible + golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum new file mode 100644 index 0000000000..9d55b0f596 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum @@ -0,0 +1,19 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go new file mode 100644 index 0000000000..7551b79235 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go new file mode 100644 index 0000000000..2a974a39b3 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -0,0 +1,135 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "golang.org/x/crypto/pkcs12" +) + +var ( + // ErrMissingCertificate is returned when no local certificate is found in the provided PFX data. + ErrMissingCertificate = errors.New("adal: certificate missing") + + // ErrMissingPrivateKey is returned when no private key is found in the provided PFX data. + ErrMissingPrivateKey = errors.New("adal: private key missing") +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} + +// DecodePfxCertificateData extracts the x509 certificate and RSA private key from the provided PFX data. +// The PFX data must contain a private key along with a certificate whose public key matches that of the +// private key or an error is returned. +// If the private key is not password protected pass the empty string for password. +func DecodePfxCertificateData(pfxData []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + blocks, err := pkcs12.ToPEM(pfxData, password) + if err != nil { + return nil, nil, err + } + // first extract the private key + var priv *rsa.PrivateKey + for _, block := range blocks { + if block.Type == "PRIVATE KEY" { + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, err + } + break + } + } + if priv == nil { + return nil, nil, ErrMissingPrivateKey + } + // now find the certificate with the matching public key of our private key + var cert *x509.Certificate + for _, block := range blocks { + if block.Type == "CERTIFICATE" { + pcert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, err + } + certKey, ok := pcert.PublicKey.(*rsa.PublicKey) + if !ok { + // keep looking + continue + } + if priv.E == certKey.E && priv.N.Cmp(certKey.N) == 0 { + // found a match + cert = pcert + break + } + } + } + if cert == nil { + return nil, nil, ErrMissingCertificate + } + return cert, priv, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 0000000000..d7e4372bbc --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,95 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "net/http" + "net/http/cookiejar" + "sync" + + "github.com/Azure/go-autorest/tracing" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +var defaultSender Sender +var defaultSenderInit = &sync.Once{} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +func sender() Sender { + // note that we can't init defaultSender in init() since it will + // execute before calling code has had a chance to enable tracing + defaultSenderInit.Do(func() { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + defaultSender = &http.Client{Jar: j, Transport: roundTripper} + }) + return defaultSender +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 0000000000..addc910999 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,1266 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/form3tech-oss/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows + OAuthGrantTypeUserPass = "password" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows + OAuthGrantTypeAuthorizationCode = "authorization_code" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" + + // msiEndpoint is the well known endpoint for getting MSI authentications tokens + msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + + // the API version to use for the MSI endpoint + msiAPIVersion = "2018-02-01" + + // the default number of attempts to refresh an MSI authentication token + defaultMaxMSIRefreshAttempts = 5 + + // asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions + asMSIEndpointEnv = "MSI_ENDPOINT" + + // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions + asMSISecretEnv = "MSI_SECRET" + + // the API version to use for the App Service MSI endpoint + appServiceAPIVersion = "2017-09-01" + + // secret header used when authenticating against app service MSI endpoint + secretHeader = "Secret" + + // the format for expires_on in UTC with AM/PM + expiresOnDateFormatPM = "1/2/2006 15:04:05 PM +00:00" + + // the format for expires_on in UTC without AM/PM + expiresOnDateFormat = "1/2/2006 15:04:05 +00:00" +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. +type MultitenantOAuthTokenProvider interface { + PrimaryOAuthToken() string + AuxiliaryOAuthTokens() []string +} + +// TokenRefreshError is an interface used by errors returned during token refresh. +type TokenRefreshError interface { + error + Response() *http.Response +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// RefresherWithContext is an interface for token refresh functionality +type RefresherWithContext interface { + RefreshWithContext(ctx context.Context) error + RefreshExchangeWithContext(ctx context.Context, resource string) error + EnsureFreshWithContext(ctx context.Context) error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// TokenRefresh is a type representing a custom callback to refresh a token +type TokenRefresh func(ctx context.Context, resource string) (*Token, error) + +// Token encapsulates the access token used to authorize Azure requests. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn json.Number `json:"expires_in"` + ExpiresOn json.Number `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +func newToken() Token { + return Token{ + ExpiresIn: "0", + ExpiresOn: "0", + NotBefore: "0", + } +} + +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := t.ExpiresOn.Float64() + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(s) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +//OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// MarshalJSON implements the json.Marshaler interface. +func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalNoSecret", + }) +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string `json:"value"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalTokenSecret", + Value: tokenSecret.ClientSecret, + }) +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} + token.Header["x5c"] = x5c + token.Claims = jwt.MapClaims{ + "aud": spt.inner.OauthConfig.TokenEndpoint.String(), + "iss": spt.inner.ClientID, + "sub": spt.inner.ClientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(24 * time.Hour).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") +} + +// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. +type ServicePrincipalUsernamePasswordSecret struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("username", secret.Username) + v.Set("password", secret.Password) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Username string `json:"username"` + Password string `json:"password"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalUsernamePasswordSecret", + Username: secret.Username, + Password: secret.Password, + }) +} + +// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. +type ServicePrincipalAuthorizationCodeSecret struct { + ClientSecret string `json:"value"` + AuthorizationCode string `json:"authCode"` + RedirectURI string `json:"redirect"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("code", secret.AuthorizationCode) + v.Set("client_secret", secret.ClientSecret) + v.Set("redirect_uri", secret.RedirectURI) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + AuthCode string `json:"authCode"` + Redirect string `json:"redirect"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalAuthorizationCodeSecret", + Value: secret.ClientSecret, + AuthCode: secret.AuthorizationCode, + Redirect: secret.RedirectURI, + }) +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + inner servicePrincipalToken + refreshLock *sync.RWMutex + sender Sender + customRefreshFunc TokenRefresh + refreshCallbacks []TokenRefreshCallback + // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. + // Settings this to a value less than 1 will use the default value. + MaxMSIRefreshAttempts int +} + +// MarshalTokenJSON returns the marshalled inner token. +func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { + return json.Marshal(spt.inner.Token) +} + +// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. +func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { + spt.refreshCallbacks = callbacks +} + +// SetCustomRefreshFunc sets a custom refresh function used to refresh the token. +func (spt *ServicePrincipalToken) SetCustomRefreshFunc(customRefreshFunc TokenRefresh) { + spt.customRefreshFunc = customRefreshFunc +} + +// MarshalJSON implements the json.Marshaler interface. +func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { + return json.Marshal(spt.inner) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { + // need to determine the token type + raw := map[string]interface{}{} + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + secret := raw["secret"].(map[string]interface{}) + switch secret["type"] { + case "ServicePrincipalNoSecret": + spt.inner.Secret = &ServicePrincipalNoSecret{} + case "ServicePrincipalTokenSecret": + spt.inner.Secret = &ServicePrincipalTokenSecret{} + case "ServicePrincipalCertificateSecret": + return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") + case "ServicePrincipalMSISecret": + return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") + case "ServicePrincipalUsernamePasswordSecret": + spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} + case "ServicePrincipalAuthorizationCodeSecret": + spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} + default: + return fmt.Errorf("unrecognized token type '%s'", secret["type"]) + } + err = json.Unmarshal(data, &spt.inner) + if err != nil { + return err + } + // Don't override the refreshLock or the sender if those have been already set. + if spt.refreshLock == nil { + spt.refreshLock = &sync.RWMutex{} + } + if spt.sender == nil { + spt.sender = sender() + } + return nil +} + +// internal type used for marshalling/unmarshalling +type servicePrincipalToken struct { + Token Token `json:"token"` + Secret ServicePrincipalSecret `json:"secret"` + OauthConfig OAuthConfig `json:"oauth"` + ClientID string `json:"clientID"` + Resource string `json:"resource"` + AutoRefresh bool `json:"autoRefresh"` + RefreshWithin time.Duration `json:"refreshWithin"` +} + +func validateOAuthConfig(oac OAuthConfig) error { + if oac.IsZero() { + return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") + } + return nil +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(id, "id"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: oauthConfig, + Secret: secret, + ClientID: id, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret +func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + secret, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. +func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(username, "username"); err != nil { + return nil, err + } + if err := validateStringParam(password, "password"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalUsernamePasswordSecret{ + Username: username, + Password: password, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the +func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(clientSecret, "clientSecret"); err != nil { + return nil, err + } + if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { + return nil, err + } + if err := validateStringParam(redirectURI, "redirectURI"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: clientSecret, + AuthorizationCode: authorizationCode, + RedirectURI: redirectURI, + }, + callbacks..., + ) +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +func GetMSIVMEndpoint() (string, error) { + return msiEndpoint, nil +} + +// NOTE: this only indicates if the ASE environment credentials have been set +// which does not necessarily mean that the caller is authenticating via ASE! +func isAppService() bool { + _, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv) + _, asMSISecretEnvExists := os.LookupEnv(asMSISecretEnv) + + return asMSIEndpointEnvExists && asMSISecretEnvExists +} + +// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions +func GetMSIAppServiceEndpoint() (string, error) { + asMSIEndpoint, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv) + + if asMSIEndpointEnvExists { + return asMSIEndpoint, nil + } + return "", errors.New("MSI endpoint not found") +} + +// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment +func GetMSIEndpoint() (string, error) { + if isAppService() { + return GetMSIAppServiceEndpoint() + } + return GetMSIVMEndpoint() +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the system assigned identity when creating the token. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, nil, callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the clientID of specified user assigned identity when creating the token. +func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, nil, callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the azure resource id of user assigned identity when creating the token. +func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, &identityResourceID, callbacks...) +} + +func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, identityResourceID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if userAssignedID != nil { + if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { + return nil, err + } + } + if identityResourceID != nil { + if err := validateStringParam(*identityResourceID, "identityResourceID"); err != nil { + return nil, err + } + } + // We set the oauth config token endpoint to be MSI's endpoint + msiEndpointURL, err := url.Parse(msiEndpoint) + if err != nil { + return nil, err + } + + v := url.Values{} + v.Set("resource", resource) + // we only support token API version 2017-09-01 for app services + clientIDParam := "client_id" + if isASEEndpoint(*msiEndpointURL) { + v.Set("api-version", appServiceAPIVersion) + clientIDParam = "clientid" + } else { + v.Set("api-version", msiAPIVersion) + } + if userAssignedID != nil { + v.Set(clientIDParam, *userAssignedID) + } + if identityResourceID != nil { + v.Set("mi_res_id", *identityResourceID) + } + msiEndpointURL.RawQuery = v.Encode() + + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: OAuthConfig{ + TokenEndpoint: *msiEndpointURL, + }, + Secret: &ServicePrincipalMSISecret{}, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, + } + + if userAssignedID != nil { + spt.inner.ClientID = *userAssignedID + } + + return spt, nil +} + +// internal type that implements TokenRefreshError +type tokenRefreshError struct { + message string + resp *http.Response +} + +// Error implements the error interface which is part of the TokenRefreshError interface. +func (tre tokenRefreshError) Error() string { + return tre.message +} + +// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. +func (tre tokenRefreshError) Response() *http.Response { + return tre.resp +} + +func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { + return tokenRefreshError{message: message, resp: resp} +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFresh() error { + return spt.EnsureFreshWithContext(context.Background()) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + // must take the read lock when initially checking the token's expiration + if spt.inner.AutoRefresh && spt.Token().WillExpireIn(spt.inner.RefreshWithin) { + // take the write lock then check again to see if the token was already refreshed + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + return spt.refreshInternal(ctx, spt.inner.Resource) + } + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.inner.Token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.RefreshWithContext(context.Background()) +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, spt.inner.Resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.RefreshExchangeWithContext(context.Background(), resource) +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, resource) +} + +func (spt *ServicePrincipalToken) getGrantType() string { + switch spt.inner.Secret.(type) { + case *ServicePrincipalUsernamePasswordSecret: + return OAuthGrantTypeUserPass + case *ServicePrincipalAuthorizationCodeSecret: + return OAuthGrantTypeAuthorizationCode + default: + return OAuthGrantTypeClientCredentials + } +} + +func isIMDS(u url.URL) bool { + return isMSIEndpoint(u) == true || isASEEndpoint(u) == true +} + +func isMSIEndpoint(endpoint url.URL) bool { + msi, err := url.Parse(msiEndpoint) + if err != nil { + return false + } + return endpoint.Host == msi.Host && endpoint.Path == msi.Path +} + +func isASEEndpoint(endpoint url.URL) bool { + aseEndpoint, err := GetMSIAppServiceEndpoint() + if err != nil { + // app service environment isn't enabled + return false + } + ase, err := url.Parse(aseEndpoint) + if err != nil { + return false + } + return endpoint.Host == ase.Host && endpoint.Path == ase.Path +} + +func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { + if spt.customRefreshFunc != nil { + token, err := spt.customRefreshFunc(ctx, resource) + if err != nil { + return err + } + spt.inner.Token = *token + return spt.InvokeRefreshCallbacks(spt.inner.Token) + } + req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + req.Header.Add("User-Agent", UserAgent()) + // Add header when runtime is on App Service or Functions + if isASEEndpoint(spt.inner.OauthConfig.TokenEndpoint) { + asMSISecret, _ := os.LookupEnv(asMSISecretEnv) + req.Header.Add(secretHeader, asMSISecret) + } + req = req.WithContext(ctx) + if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { + v := url.Values{} + v.Set("client_id", spt.inner.ClientID) + v.Set("resource", resource) + + if spt.inner.Token.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.inner.Token.RefreshToken) + // web apps must specify client_secret when refreshing tokens + // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens + if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + } else { + v.Set("grant_type", spt.getGrantType()) + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + req.Body = body + } + + if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { + req.Method = http.MethodGet + // the metadata header isn't applicable for ASE + if !isASEEndpoint(spt.inner.OauthConfig.TokenEndpoint) { + req.Header.Set(metadataHeader, "true") + } + } + + var resp *http.Response + if isMSIEndpoint(spt.inner.OauthConfig.TokenEndpoint) { + resp, err = getMSIEndpoint(ctx, spt.sender) + if err != nil { + // return a TokenRefreshError here so that we don't keep retrying + return newTokenRefreshError(fmt.Sprintf("the MSI endpoint is not available. Failed HTTP request to MSI endpoint: %v", err), nil) + } + resp.Body.Close() + } + if isIMDS(spt.inner.OauthConfig.TokenEndpoint) { + resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) + } else { + resp, err = spt.sender.Do(req) + } + if err != nil { + // don't return a TokenRefreshError here; this will allow retry logic to apply + return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) + } + + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v Endpoint %s", resp.StatusCode, err, req.URL.String()), resp) + } + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s Endpoint %s", resp.StatusCode, string(rb), req.URL.String()), resp) + } + + // for the following error cases don't return a TokenRefreshError. the operation succeeded + // but some transient failure happened during deserialization. by returning a generic error + // the retry logic will kick in (we don't retry on TokenRefreshError). + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + token := struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + // AAD returns expires_in as a string, ADFS returns it as an int + ExpiresIn json.Number `json:"expires_in"` + // expires_on can be in two formats, a UTC time stamp or the number of seconds. + ExpiresOn string `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` + }{} + // return a TokenRefreshError in the follow error cases as the token is in an unexpected format + err = json.Unmarshal(rb, &token) + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)), resp) + } + expiresOn := json.Number("") + // ADFS doesn't include the expires_on field + if token.ExpiresOn != "" { + if expiresOn, err = parseExpiresOn(token.ExpiresOn); err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: failed to parse expires_on: %v value '%s'", err, token.ExpiresOn), resp) + } + } + spt.inner.Token.AccessToken = token.AccessToken + spt.inner.Token.RefreshToken = token.RefreshToken + spt.inner.Token.ExpiresIn = token.ExpiresIn + spt.inner.Token.ExpiresOn = expiresOn + spt.inner.Token.NotBefore = token.NotBefore + spt.inner.Token.Resource = token.Resource + spt.inner.Token.Type = token.Type + + return spt.InvokeRefreshCallbacks(spt.inner.Token) +} + +// converts expires_on to the number of seconds +func parseExpiresOn(s string) (json.Number, error) { + // convert the expiration date to the number of seconds from now + timeToDuration := func(t time.Time) json.Number { + dur := t.Sub(time.Now().UTC()) + return json.Number(strconv.FormatInt(int64(dur.Round(time.Second).Seconds()), 10)) + } + if _, err := strconv.ParseInt(s, 10, 64); err == nil { + // this is the number of seconds case, no conversion required + return json.Number(s), nil + } else if eo, err := time.Parse(expiresOnDateFormatPM, s); err == nil { + return timeToDuration(eo), nil + } else if eo, err := time.Parse(expiresOnDateFormat, s); err == nil { + return timeToDuration(eo), nil + } else { + // unknown format + return json.Number(""), err + } +} + +// retry logic specific to retrieving a token from the IMDS endpoint +func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { + // copied from client.go due to circular dependency + retries := []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + // extra retry status codes specific to IMDS + retries = append(retries, + http.StatusNotFound, + http.StatusGone, + // all remaining 5xx + http.StatusNotImplemented, + http.StatusHTTPVersionNotSupported, + http.StatusVariantAlsoNegotiates, + http.StatusInsufficientStorage, + http.StatusLoopDetected, + http.StatusNotExtended, + http.StatusNetworkAuthenticationRequired) + + // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance + + const maxDelay time.Duration = 60 * time.Second + + attempt := 0 + delay := time.Duration(0) + + // maxAttempts is user-specified, ensure that its value is greater than zero else no request will be made + if maxAttempts < 1 { + maxAttempts = defaultMaxMSIRefreshAttempts + } + + for attempt < maxAttempts { + if resp != nil && resp.Body != nil { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } + resp, err = sender.Do(req) + // we want to retry if err is not nil or the status code is in the list of retry codes + if err == nil && !responseHasStatusCode(resp, retries...) { + return + } + + // perform exponential backoff with a cap. + // must increment attempt before calculating delay. + attempt++ + // the base value of 2 is the "delta backoff" as specified in the guidance doc + delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) + if delay > maxDelay { + delay = maxDelay + } + + select { + case <-time.After(delay): + // intentionally left blank + case <-req.Context().Done(): + err = req.Context().Err() + return + } + } + return +} + +func responseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp != nil { + for _, i := range codes { + if i == resp.StatusCode { + return true + } + } + } + return false +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.inner.AutoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.inner.RefreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } + +// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. +func (spt *ServicePrincipalToken) OAuthToken() string { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token.OAuthToken() +} + +// Token returns a copy of the current token. +func (spt *ServicePrincipalToken) Token() Token { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token +} + +// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. +type MultiTenantServicePrincipalToken struct { + PrimaryToken *ServicePrincipalToken + AuxiliaryTokens []*ServicePrincipalToken +} + +// PrimaryOAuthToken returns the primary authorization token. +func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { + return mt.PrimaryToken.OAuthToken() +} + +// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. +func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { + tokens := make([]string, len(mt.AuxiliaryTokens)) + for i := range mt.AuxiliaryTokens { + tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() + } + return tokens +} + +// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. +func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} + +// NewMultiTenantServicePrincipalTokenFromCertificate creates a new MultiTenantServicePrincipalToken with the specified certificate credentials and resource. +func NewMultiTenantServicePrincipalTokenFromCertificate(multiTenantCfg MultiTenantOAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalTokenWithSecret( + *multiTenantCfg.PrimaryTenant(), + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalTokenWithSecret( + *auxTenants[i], + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} + +// MSIAvailable returns true if the MSI endpoint is available for authentication. +func MSIAvailable(ctx context.Context, sender Sender) bool { + resp, err := getMSIEndpoint(ctx, sender) + if err == nil { + resp.Body.Close() + } + return err == nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go new file mode 100644 index 0000000000..54b4defefd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go @@ -0,0 +1,77 @@ +// +build go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adal + +import ( + "context" + "fmt" + "net/http" + "time" +) + +func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { + // this cannot fail, the return sig is due to legacy reasons + msiEndpoint, _ := GetMSIVMEndpoint() + tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) + defer cancel() + // http.NewRequestWithContext() was added in Go 1.13 + req, _ := http.NewRequestWithContext(tempCtx, http.MethodGet, msiEndpoint, nil) + q := req.URL.Query() + q.Add("api-version", msiAPIVersion) + req.URL.RawQuery = q.Encode() + return sender.Do(req) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go new file mode 100644 index 0000000000..6d73bae15e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go @@ -0,0 +1,76 @@ +// +build !go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adal + +import ( + "context" + "net/http" + "time" +) + +func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { + // this cannot fail, the return sig is due to legacy reasons + msiEndpoint, _ := GetMSIVMEndpoint() + tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) + defer cancel() + req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil) + req = req.WithContext(tempCtx) + q := req.URL.Query() + q.Add("api-version", msiAPIVersion) + req.URL.RawQuery = q.Encode() + return sender.Do(req) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return err + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return err + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go new file mode 100644 index 0000000000..c867b34843 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go @@ -0,0 +1,45 @@ +package adal + +import ( + "fmt" + "runtime" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const number = "v1.0.0" + +var ( + ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. +func UserAgent() string { + return ua +} + +// AddToUserAgent adds an extension to the current user agent +func AddToUserAgent(extension string) error { + if extension != "" { + ua = fmt.Sprintf("%s %s", ua, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 0000000000..1226c41115 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,353 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" + apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" + bingAPISdkHeader = "X-BingApis-SDK-Client" + golangBingAPISdkHeaderValue = "Go-SDK" + authorization = "Authorization" + basic = "Basic" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// APIKeyAuthorizer implements API Key authorization. +type APIKeyAuthorizer struct { + headers map[string]interface{} + queryParameters map[string]interface{} +} + +// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(headers, nil) +} + +// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. +func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(nil, queryParameters) +} + +// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { + return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. +func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) + } +} + +// CognitiveServicesAuthorizer implements authorization for Cognitive Services. +type CognitiveServicesAuthorizer struct { + subscriptionKey string +} + +// NewCognitiveServicesAuthorizer is +func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { + return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} +} + +// WithAuthorization is +func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[apiKeyAuthorizerHeader] = csa.subscriptionKey + headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // the ordering is important here, prefer RefresherWithContext if available + if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { + err = refresher.EnsureFresh() + } + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, + "Failed to refresh the Token for request to %s", r.URL) + } + return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) + } + return r, err + }) + } +} + +// TokenProvider returns OAuthTokenProvider so that it can be used for authorization outside the REST. +func (ba *BearerAuthorizer) TokenProvider() adal.OAuthTokenProvider { + return ba.tokenProvider +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if s == nil { + s = sender(tls.RenegotiateNever) + } + return &BearerAuthorizerCallback{sender: s, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err != nil { + return r, err + } + DrainResponseBody(resp) + if resp.StatusCode == 401 && hasBearerChallenge(resp.Header) { + bc, err := newBearerChallenge(resp.Header) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return Prepare(r, ba.WithAuthorization()) + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(header http.Header) bool { + authHeader := header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(header http.Header) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} + +// EventGridKeyAuthorizer implements authorization for event grid using key authentication. +type EventGridKeyAuthorizer struct { + topicKey string +} + +// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer +// with the specified topic key. +func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { + return EventGridKeyAuthorizer{topicKey: topicKey} +} + +// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. +func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { + headers := map[string]interface{}{ + "aeg-sas-key": egta.topicKey, + } + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header +// with the value "Basic " where is a base64-encoded username:password tuple. +type BasicAuthorizer struct { + userName string + password string +} + +// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. +func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { + return &BasicAuthorizer{ + userName: userName, + password: password, + } +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Basic " followed by the base64-encoded username:password tuple. +func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. +type MultiTenantServicePrincipalTokenAuthorizer interface { + WithAuthorization() PrepareDecorator +} + +// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider +func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { + return NewMultiTenantBearerAuthorizer(tp) +} + +// MultiTenantBearerAuthorizer implements bearer authorization across multiple tenants. +type MultiTenantBearerAuthorizer struct { + tp adal.MultitenantOAuthTokenProvider +} + +// NewMultiTenantBearerAuthorizer creates a MultiTenantBearerAuthorizer using the given token provider. +func NewMultiTenantBearerAuthorizer(tp adal.MultitenantOAuthTokenProvider) *MultiTenantBearerAuthorizer { + return &MultiTenantBearerAuthorizer{tp: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the +// primary token along with the auxiliary authorization header using the auxiliary tokens. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (mt *MultiTenantBearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, + "Failed to refresh one or more Tokens for request to %s", r.URL) + } + } + r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) + if err != nil { + return r, err + } + auxTokens := mt.tp.AuxiliaryOAuthTokens() + for i := range auxTokens { + auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) + } + return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, ", "))) + }) + } +} + +// TokenProvider returns the underlying MultitenantOAuthTokenProvider for this authorizer. +func (mt *MultiTenantBearerAuthorizer) TokenProvider() adal.MultitenantOAuthTokenProvider { + return mt.tp +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go new file mode 100644 index 0000000000..66501493bd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go @@ -0,0 +1,66 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "strings" +) + +// SASTokenAuthorizer implements an authorization for SAS Token Authentication +// this can be used for interaction with Blob Storage Endpoints +type SASTokenAuthorizer struct { + sasToken string +} + +// NewSASTokenAuthorizer creates a SASTokenAuthorizer using the given credentials +func NewSASTokenAuthorizer(sasToken string) (*SASTokenAuthorizer, error) { + if strings.TrimSpace(sasToken) == "" { + return nil, fmt.Errorf("sasToken cannot be empty") + } + + token := sasToken + if strings.HasPrefix(sasToken, "?") { + token = strings.TrimPrefix(sasToken, "?") + } + + return &SASTokenAuthorizer{ + sasToken: token, + }, nil +} + +// WithAuthorization returns a PrepareDecorator that adds a shared access signature token to the +// URI's query parameters. This can be used for the Blob, Queue, and File Services. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature +func (sas *SASTokenAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + + if r.URL.RawQuery == "" { + r.URL.RawQuery = sas.sasToken + } else if !strings.Contains(r.URL.RawQuery, sas.sasToken) { + r.URL.RawQuery = fmt.Sprintf("%s&%s", r.URL.RawQuery, sas.sasToken) + } + + return Prepare(r) + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go new file mode 100644 index 0000000000..2af5030a1c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go @@ -0,0 +1,307 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "time" +) + +// SharedKeyType defines the enumeration for the various shared key types. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key for details on the shared key types. +type SharedKeyType string + +const ( + // SharedKey is used to authorize against blobs, files and queues services. + SharedKey SharedKeyType = "sharedKey" + + // SharedKeyForTable is used to authorize against the table service. + SharedKeyForTable SharedKeyType = "sharedKeyTable" + + // SharedKeyLite is used to authorize against blobs, files and queues services. It's provided for + // backwards compatibility with API versions before 2009-09-19. Prefer SharedKey instead. + SharedKeyLite SharedKeyType = "sharedKeyLite" + + // SharedKeyLiteForTable is used to authorize against the table service. It's provided for + // backwards compatibility with older table API versions. Prefer SharedKeyForTable instead. + SharedKeyLiteForTable SharedKeyType = "sharedKeyLiteTable" +) + +const ( + headerAccept = "Accept" + headerAcceptCharset = "Accept-Charset" + headerContentEncoding = "Content-Encoding" + headerContentLength = "Content-Length" + headerContentMD5 = "Content-MD5" + headerContentLanguage = "Content-Language" + headerIfModifiedSince = "If-Modified-Since" + headerIfMatch = "If-Match" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerDate = "Date" + headerXMSDate = "X-Ms-Date" + headerXMSVersion = "x-ms-version" + headerRange = "Range" +) + +const storageEmulatorAccountName = "devstoreaccount1" + +// SharedKeyAuthorizer implements an authorization for Shared Key +// this can be used for interaction with Blob, File and Queue Storage Endpoints +type SharedKeyAuthorizer struct { + accountName string + accountKey []byte + keyType SharedKeyType +} + +// NewSharedKeyAuthorizer creates a SharedKeyAuthorizer using the provided credentials and shared key type. +func NewSharedKeyAuthorizer(accountName, accountKey string, keyType SharedKeyType) (*SharedKeyAuthorizer, error) { + key, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return nil, fmt.Errorf("malformed storage account key: %v", err) + } + return &SharedKeyAuthorizer{ + accountName: accountName, + accountKey: key, + keyType: keyType, + }, nil +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is " " followed by the computed key. +// This can be used for the Blob, Queue, and File Services +// +// from: https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key +// You may use Shared Key authorization to authorize a request made against the +// 2009-09-19 version and later of the Blob and Queue services, +// and version 2014-02-14 and later of the File services. +func (sk *SharedKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + + sk, err := buildSharedKey(sk.accountName, sk.accountKey, r, sk.keyType) + if err != nil { + return r, err + } + return Prepare(r, WithHeader(headerAuthorization, sk)) + }) + } +} + +func buildSharedKey(accName string, accKey []byte, req *http.Request, keyType SharedKeyType) (string, error) { + canRes, err := buildCanonicalizedResource(accName, req.URL.String(), keyType) + if err != nil { + return "", err + } + + if req.Header == nil { + req.Header = http.Header{} + } + + // ensure date is set + if req.Header.Get(headerDate) == "" && req.Header.Get(headerXMSDate) == "" { + date := time.Now().UTC().Format(http.TimeFormat) + req.Header.Set(headerXMSDate, date) + } + canString, err := buildCanonicalizedString(req.Method, req.Header, canRes, keyType) + if err != nil { + return "", err + } + return createAuthorizationHeader(accName, accKey, canString, keyType), nil +} + +func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := bytes.NewBufferString("") + if accountName != storageEmulatorAccountName { + cr.WriteString("/") + cr.WriteString(getCanonicalizedAccountName(accountName)) + } + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 + if keyType == SharedKey { + if len(params) > 0 { + cr.WriteString("\n") + + keys := []string{} + for key := range params { + keys = append(keys, key) + } + sort.Strings(keys) + + completeParams := []string{} + for _, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) + } + cr.WriteString(strings.Join(completeParams, "\n")) + } + } else { + // search for "comp" parameter, if exists then add it to canonicalizedresource + if v, ok := params["comp"]; ok { + cr.WriteString("?comp=" + v[0]) + } + } + + return string(cr.Bytes()), nil +} + +func getCanonicalizedAccountName(accountName string) string { + // since we may be trying to access a secondary storage account, we need to + // remove the -secondary part of the storage name + return strings.TrimSuffix(accountName, "-secondary") +} + +func buildCanonicalizedString(verb string, headers http.Header, canonicalizedResource string, keyType SharedKeyType) (string, error) { + contentLength := headers.Get(headerContentLength) + if contentLength == "0" { + contentLength = "" + } + date := headers.Get(headerDate) + if v := headers.Get(headerXMSDate); v != "" { + if keyType == SharedKey || keyType == SharedKeyLite { + date = "" + } else { + date = v + } + } + var canString string + switch keyType { + case SharedKey: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentEncoding), + headers.Get(headerContentLanguage), + contentLength, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + headers.Get(headerIfModifiedSince), + headers.Get(headerIfMatch), + headers.Get(headerIfNoneMatch), + headers.Get(headerIfUnmodifiedSince), + headers.Get(headerRange), + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case SharedKeyForTable: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + canonicalizedResource, + }, "\n") + case SharedKeyLite: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case SharedKeyLiteForTable: + canString = strings.Join([]string{ + date, + canonicalizedResource, + }, "\n") + default: + return "", fmt.Errorf("key type '%s' is not supported", keyType) + } + return canString, nil +} + +func buildCanonicalizedHeader(headers http.Header) string { + cm := make(map[string]string) + + for k := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = headers.Get(k) + } + } + + if len(cm) == 0 { + return "" + } + + keys := []string{} + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := bytes.NewBufferString("") + + for _, key := range keys { + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(cm[key]) + ch.WriteRune('\n') + } + + return strings.TrimSuffix(string(ch.Bytes()), "\n") +} + +func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string { + h := hmac.New(sha256.New, accountKey) + h.Write([]byte(canonicalizedString)) + signature := base64.StdEncoding.EncodeToString(h.Sum(nil)) + var key string + switch keyType { + case SharedKey, SharedKeyForTable: + key = "SharedKey" + case SharedKeyLite, SharedKeyLiteForTable: + key = "SharedKeyLite" + } + return fmt.Sprintf("%s %s:%s", key, getCanonicalizedAccountName(accountName), signature) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 0000000000..aafdf021fd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,150 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp == nil { + return false + } + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} + +// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. +func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare((&http.Request{}).WithContext(ctx), + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 0000000000..42e28cf2e4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,991 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/tracing" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} + +// FutureAPI contains the set of methods on the Future type. +type FutureAPI interface { + // Response returns the last HTTP response. + Response() *http.Response + + // Status returns the last status message of the operation. + Status() string + + // PollingMethod returns the method used to monitor the status of the asynchronous operation. + PollingMethod() PollingMethodType + + // DoneWithContext queries the service to see if the operation has completed. + DoneWithContext(context.Context, autorest.Sender) (bool, error) + + // GetPollingDelay returns a duration the application should wait before checking + // the status of the asynchronous request and true; this value is returned from + // the service via the Retry-After response header. If the header wasn't returned + // then the function returns the zero-value time.Duration and false. + GetPollingDelay() (time.Duration, bool) + + // WaitForCompletionRef will return when one of the following conditions is met: the long + // running operation has completed, the provided context is cancelled, or the client's + // polling duration has been exceeded. It will retry failed polling attempts based on + // the retry value defined in the client up to the maximum retry attempts. + // If no deadline is specified in the context then the client.PollingDuration will be + // used to determine if a default deadline should be used. + // If PollingDuration is greater than zero the value will be used as the context's timeout. + // If PollingDuration is zero then no default deadline will be used. + WaitForCompletionRef(context.Context, autorest.Client) error + + // MarshalJSON implements the json.Marshaler interface. + MarshalJSON() ([]byte, error) + + // MarshalJSON implements the json.Unmarshaler interface. + UnmarshalJSON([]byte) error + + // PollingURL returns the URL used for retrieving the status of the long-running operation. + PollingURL() string + + // GetResult should be called once polling has completed successfully. + // It makes the final GET call to retrieve the resultant payload. + GetResult(autorest.Sender) (*http.Response, error) +} + +var _ FutureAPI = (*Future)(nil) + +// Future provides a mechanism to access the status and results of an asynchronous request. +// Since futures are stateful they should be passed by value to avoid race conditions. +type Future struct { + pt pollingTracker +} + +// NewFutureFromResponse returns a new Future object initialized +// with the initial response from an asynchronous operation. +func NewFutureFromResponse(resp *http.Response) (Future, error) { + pt, err := createPollingTracker(resp) + return Future{pt: pt}, err +} + +// Response returns the last HTTP response. +func (f Future) Response() *http.Response { + if f.pt == nil { + return nil + } + return f.pt.latestResponse() +} + +// Status returns the last status message of the operation. +func (f Future) Status() string { + if f.pt == nil { + return "" + } + return f.pt.pollingStatus() +} + +// PollingMethod returns the method used to monitor the status of the asynchronous operation. +func (f Future) PollingMethod() PollingMethodType { + if f.pt == nil { + return PollingUnknown + } + return f.pt.pollingMethod() +} + +// DoneWithContext queries the service to see if the operation has completed. +func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + + if f.pt == nil { + return false, autorest.NewError("Future", "Done", "future is not initialized") + } + if f.pt.hasTerminated() { + return true, f.pt.pollingError() + } + if err := f.pt.pollForStatus(ctx, sender); err != nil { + return false, err + } + if err := f.pt.checkForErrors(); err != nil { + return f.pt.hasTerminated(), err + } + if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { + return false, err + } + if err := f.pt.initPollingMethod(); err != nil { + return false, err + } + if err := f.pt.updatePollingMethod(); err != nil { + return false, err + } + return f.pt.hasTerminated(), f.pt.pollingError() +} + +// GetPollingDelay returns a duration the application should wait before checking +// the status of the asynchronous request and true; this value is returned from +// the service via the Retry-After response header. If the header wasn't returned +// then the function returns the zero-value time.Duration and false. +func (f Future) GetPollingDelay() (time.Duration, bool) { + if f.pt == nil { + return 0, false + } + resp := f.pt.latestResponse() + if resp == nil { + return 0, false + } + + retry := resp.Header.Get(autorest.HeaderRetryAfter) + if retry == "" { + return 0, false + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + panic(err) + } + + return d, true +} + +// WaitForCompletionRef will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +// If no deadline is specified in the context then the client.PollingDuration will be +// used to determine if a default deadline should be used. +// If PollingDuration is greater than zero the value will be used as the context's timeout. +// If PollingDuration is zero then no default deadline will be used. +func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + cancelCtx := ctx + // if the provided context already has a deadline don't override it + _, hasDeadline := ctx.Deadline() + if d := client.PollingDuration; !hasDeadline && d != 0 { + var cancel context.CancelFunc + cancelCtx, cancel = context.WithTimeout(ctx, d) + defer cancel() + } + // if the initial response has a Retry-After, sleep for the specified amount of time before starting to poll + if delay, ok := f.GetPollingDelay(); ok { + if delayElapsed := autorest.DelayForBackoff(delay, 0, cancelCtx.Done()); !delayElapsed { + err = cancelCtx.Err() + return + } + } + done, err := f.DoneWithContext(ctx, client) + for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { + if attempts >= client.RetryAttempts { + return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") + } + // we want delayAttempt to be zero in the non-error case so + // that DelayForBackoff doesn't perform exponential back-off + var delayAttempt int + var delay time.Duration + if err == nil { + // check for Retry-After delay, if not present use the client's polling delay + var ok bool + delay, ok = f.GetPollingDelay() + if !ok { + delay = client.PollingDelay + } + } else { + // there was an error polling for status so perform exponential + // back-off based on the number of attempts using the client's retry + // duration. update attempts after delayAttempt to avoid off-by-one. + delayAttempt = attempts + delay = client.RetryDuration + attempts++ + } + // wait until the delay elapses or the context is cancelled + delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) + if !delayElapsed { + return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") + } + } + return +} + +// MarshalJSON implements the json.Marshaler interface. +func (f Future) MarshalJSON() ([]byte, error) { + return json.Marshal(f.pt) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (f *Future) UnmarshalJSON(data []byte) error { + // unmarshal into JSON object to determine the tracker type + obj := map[string]interface{}{} + err := json.Unmarshal(data, &obj) + if err != nil { + return err + } + if obj["method"] == nil { + return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") + } + method := obj["method"].(string) + switch strings.ToUpper(method) { + case http.MethodDelete: + f.pt = &pollingTrackerDelete{} + case http.MethodPatch: + f.pt = &pollingTrackerPatch{} + case http.MethodPost: + f.pt = &pollingTrackerPost{} + case http.MethodPut: + f.pt = &pollingTrackerPut{} + default: + return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) + } + // now unmarshal into the tracker + return json.Unmarshal(data, &f.pt) +} + +// PollingURL returns the URL used for retrieving the status of the long-running operation. +func (f Future) PollingURL() string { + if f.pt == nil { + return "" + } + return f.pt.pollingURL() +} + +// GetResult should be called once polling has completed successfully. +// It makes the final GET call to retrieve the resultant payload. +func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { + if f.pt.finalGetURL() == "" { + // we can end up in this situation if the async operation returns a 200 + // with no polling URLs. in that case return the response which should + // contain the JSON payload (only do this for successful terminal cases). + if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { + return lr, nil + } + return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") + } + req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) + if err != nil { + return nil, err + } + resp, err := sender.Do(req) + if err == nil && resp.Body != nil { + // copy the body and close it so callers don't have to + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return resp, err + } + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + return resp, err +} + +type pollingTracker interface { + // these methods can differ per tracker + + // checks the response headers and status code to determine the polling mechanism + updatePollingMethod() error + + // checks the response for tracker-specific error conditions + checkForErrors() error + + // returns true if provisioning state should be checked + provisioningStateApplicable() bool + + // methods common to all trackers + + // initializes a tracker's polling URL and method, called for each iteration. + // these values can be overridden by each polling tracker as required. + initPollingMethod() error + + // initializes the tracker's internal state, call this when the tracker is created + initializeState() error + + // makes an HTTP request to check the status of the LRO + pollForStatus(ctx context.Context, sender autorest.Sender) error + + // updates internal tracker state, call this after each call to pollForStatus + updatePollingState(provStateApl bool) error + + // returns the error response from the service, can be nil + pollingError() error + + // returns the polling method being used + pollingMethod() PollingMethodType + + // returns the state of the LRO as returned from the service + pollingStatus() string + + // returns the URL used for polling status + pollingURL() string + + // returns the URL used for the final GET to retrieve the resource + finalGetURL() string + + // returns true if the LRO is in a terminal state + hasTerminated() bool + + // returns true if the LRO is in a failed terminal state + hasFailed() bool + + // returns true if the LRO is in a successful terminal state + hasSucceeded() bool + + // returns the cached HTTP response after a call to pollForStatus(), can be nil + latestResponse() *http.Response +} + +type pollingTrackerBase struct { + // resp is the last response, either from the submission of the LRO or from polling + resp *http.Response + + // method is the HTTP verb, this is needed for deserialization + Method string `json:"method"` + + // rawBody is the raw JSON response body + rawBody map[string]interface{} + + // denotes if polling is using async-operation or location header + Pm PollingMethodType `json:"pollingMethod"` + + // the URL to poll for status + URI string `json:"pollingURI"` + + // the state of the LRO as returned from the service + State string `json:"lroState"` + + // the URL to GET for the final result + FinalGetURI string `json:"resultURI"` + + // used to hold an error object returned from the service + Err *ServiceError `json:"error,omitempty"` +} + +func (pt *pollingTrackerBase) initializeState() error { + // determine the initial polling state based on response body and/or HTTP status + // code. this is applicable to the initial LRO response, not polling responses! + pt.Method = pt.resp.Request.Method + if err := pt.updateRawBody(); err != nil { + return err + } + switch pt.resp.StatusCode { + case http.StatusOK: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + if pt.hasFailed() { + pt.updateErrorFromResponse() + return pt.pollingError() + } + } else { + pt.State = operationSucceeded + } + case http.StatusCreated: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationInProgress + } + case http.StatusAccepted: + pt.State = operationInProgress + case http.StatusNoContent: + pt.State = operationSucceeded + default: + pt.State = operationFailed + pt.updateErrorFromResponse() + return pt.pollingError() + } + return pt.initPollingMethod() +} + +func (pt pollingTrackerBase) getProvisioningState() *string { + if pt.rawBody != nil && pt.rawBody["properties"] != nil { + p := pt.rawBody["properties"].(map[string]interface{}) + if ps := p["provisioningState"]; ps != nil { + s := ps.(string) + return &s + } + } + return nil +} + +func (pt *pollingTrackerBase) updateRawBody() error { + pt.rawBody = map[string]interface{}{} + if pt.resp.ContentLength != 0 { + defer pt.resp.Body.Close() + b, err := ioutil.ReadAll(pt.resp.Body) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty + if len(b) == 0 { + return nil + } + if err = json.Unmarshal(b, &pt.rawBody); err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") + } + } + return nil +} + +func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { + req, err := http.NewRequest(http.MethodGet, pt.URI, nil) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") + } + + req = req.WithContext(ctx) + preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...) + req, err = preparer.Prepare(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request") + } + pt.resp, err = sender.Do(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") + } + if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { + // reset the service error on success case + pt.Err = nil + err = pt.updateRawBody() + } else { + // check response body for error content + pt.updateErrorFromResponse() + err = pt.pollingError() + } + return err +} + +// attempts to unmarshal a ServiceError type from the response body. +// if that fails then make a best attempt at creating something meaningful. +// NOTE: this assumes that the async operation has failed. +func (pt *pollingTrackerBase) updateErrorFromResponse() { + var err error + if pt.resp.ContentLength != 0 { + type respErr struct { + ServiceError *ServiceError `json:"error"` + } + re := respErr{} + defer pt.resp.Body.Close() + var b []byte + if b, err = ioutil.ReadAll(pt.resp.Body); err != nil { + goto Default + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + if len(b) == 0 { + goto Default + } + if err = json.Unmarshal(b, &re); err != nil { + goto Default + } + // unmarshalling the error didn't yield anything, try unwrapped error + if re.ServiceError == nil { + err = json.Unmarshal(b, &re.ServiceError) + if err != nil { + goto Default + } + } + // the unmarshaller will ensure re.ServiceError is non-nil + // even if there was no content unmarshalled so check the code. + if re.ServiceError.Code != "" { + pt.Err = re.ServiceError + return + } + } +Default: + se := &ServiceError{ + Code: pt.pollingStatus(), + Message: "The async operation failed.", + } + if err != nil { + se.InnerError = make(map[string]interface{}) + se.InnerError["unmarshalError"] = err.Error() + } + // stick the response body into the error object in hopes + // it contains something useful to help diagnose the failure. + if len(pt.rawBody) > 0 { + se.AdditionalInfo = []map[string]interface{}{ + pt.rawBody, + } + } + pt.Err = se +} + +func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { + if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { + pt.State = pt.rawBody["status"].(string) + } else { + if pt.resp.StatusCode == http.StatusAccepted { + pt.State = operationInProgress + } else if provStateApl { + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationSucceeded + } + } else { + return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") + } + } + // if the operation has failed update the error state + if pt.hasFailed() { + pt.updateErrorFromResponse() + } + return nil +} + +func (pt pollingTrackerBase) pollingError() error { + if pt.Err == nil { + return nil + } + return pt.Err +} + +func (pt pollingTrackerBase) pollingMethod() PollingMethodType { + return pt.Pm +} + +func (pt pollingTrackerBase) pollingStatus() string { + return pt.State +} + +func (pt pollingTrackerBase) pollingURL() string { + return pt.URI +} + +func (pt pollingTrackerBase) finalGetURL() string { + return pt.FinalGetURI +} + +func (pt pollingTrackerBase) hasTerminated() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) hasFailed() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) +} + +func (pt pollingTrackerBase) hasSucceeded() bool { + return strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) latestResponse() *http.Response { + return pt.resp +} + +// error checking common to all trackers +func (pt pollingTrackerBase) baseCheckForErrors() error { + // for Azure-AsyncOperations the response body cannot be nil or empty + if pt.Pm == PollingAsyncOperation { + if pt.resp.Body == nil || pt.resp.ContentLength == 0 { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") + } + if pt.rawBody["status"] == nil { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") + } + } + return nil +} + +// default initialization of polling URL/method. each verb tracker will update this as required. +func (pt *pollingTrackerBase) initPollingMethod() error { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + return nil + } + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh != "" { + pt.URI = lh + pt.Pm = PollingLocation + return nil + } + // it's ok if we didn't find a polling header, this will be handled elsewhere + return nil +} + +// DELETE + +type pollingTrackerDelete struct { + pollingTrackerBase +} + +func (pt *pollingTrackerDelete) updatePollingMethod() error { + // for 201 the Location header is required + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + } + pt.Pm = PollingLocation + pt.FinalGetURI = pt.URI + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerDelete) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerDelete) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PATCH + +type pollingTrackerPatch struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPatch) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + // note the absence of the "final GET" mechanism for PATCH + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + if ao == "" { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } else { + pt.URI = lh + pt.Pm = PollingLocation + } + } + } + return nil +} + +func (pt pollingTrackerPatch) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPatch) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// POST + +type pollingTrackerPost struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPost) updatePollingMethod() error { + // 201 requires Location header + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + pt.FinalGetURI = lh + pt.Pm = PollingLocation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPost) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPost) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PUT + +type pollingTrackerPut struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPut) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPut) checkForErrors() error { + err := pt.baseCheckForErrors() + if err != nil { + return err + } + // if there are no LRO headers then the body cannot be empty + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } + lh, err := getURLFromLocationHeader(pt.resp) + if err != nil { + return err + } + if ao == "" && lh == "" && len(pt.rawBody) == 0 { + return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") + } + return nil +} + +func (pt pollingTrackerPut) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// creates a polling tracker based on the verb of the original request +func createPollingTracker(resp *http.Response) (pollingTracker, error) { + var pt pollingTracker + switch strings.ToUpper(resp.Request.Method) { + case http.MethodDelete: + pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPatch: + pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPost: + pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPut: + pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} + default: + return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) + } + if err := pt.initializeState(); err != nil { + return pt, err + } + // this initializes the polling header values, we do this during creation in case the + // initial response send us invalid values; this way the API call will return a non-nil + // error (not doing this means the error shows up in Future.Done) + return pt, pt.updatePollingMethod() +} + +// gets the polling URL from the Azure-AsyncOperation header. +// ensures the URL is well-formed and absolute. +func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// gets the polling URL from the Location header. +// ensures the URL is well-formed and absolute. +func getURLFromLocationHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// verify that the URL is valid and absolute +func isValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// PollingMethodType defines a type used for enumerating polling mechanisms. +type PollingMethodType string + +const ( + // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. + PollingAsyncOperation PollingMethodType = "AsyncOperation" + + // PollingLocation indicates the polling method uses the Location header. + PollingLocation PollingMethodType = "Location" + + // PollingRequestURI indicates the polling method uses the original request URI. + PollingRequestURI PollingMethodType = "RequestURI" + + // PollingUnknown indicates an unknown polling method and is the default value. + PollingUnknown PollingMethodType = "" +) + +// AsyncOpIncompleteError is the type that's returned from a future that has not completed. +type AsyncOpIncompleteError struct { + // FutureType is the name of the type composed of a azure.Future. + FutureType string +} + +// Error returns an error message including the originating type name of the error. +func (e AsyncOpIncompleteError) Error() string { + return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) +} + +// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. +func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { + return AsyncOpIncompleteError{ + FutureType: futureType, + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 0000000000..0ded76bc6f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,388 @@ +// Package azure provides Azure-specific implementations used with AutoRest. +// See the included examples for more detail. +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderContentType is the type of the content in the HTTP response. + HeaderContentType = "Content-Type" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +// It adhears to the OData v4 specification for error responses. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` +} + +func (se ServiceError) Error() string { + result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) + + if se.Target != nil { + result += fmt.Sprintf(" Target=%q", *se.Target) + } + + if se.Details != nil { + d, err := json.Marshal(se.Details) + if err != nil { + result += fmt.Sprintf(" Details=%v", se.Details) + } + result += fmt.Sprintf(" Details=%v", string(d)) + } + + if se.InnerError != nil { + d, err := json.Marshal(se.InnerError) + if err != nil { + result += fmt.Sprintf(" InnerError=%v", se.InnerError) + } + result += fmt.Sprintf(" InnerError=%v", string(d)) + } + + if se.AdditionalInfo != nil { + d, err := json.Marshal(se.AdditionalInfo) + if err != nil { + result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) + } + result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) + } + + return result +} + +// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. +func (se *ServiceError) UnmarshalJSON(b []byte) error { + // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 + + type serviceErrorInternal struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target,omitempty"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo,omitempty"` + // not all services conform to the OData v4 spec. + // the following fields are where we've seen discrepancies + + // spec calls for []map[string]interface{} but have seen map[string]interface{} + Details interface{} `json:"details,omitempty"` + + // spec calls for map[string]interface{} but have seen []map[string]interface{} and string + InnerError interface{} `json:"innererror,omitempty"` + } + + sei := serviceErrorInternal{} + if err := json.Unmarshal(b, &sei); err != nil { + return err + } + + // copy the fields we know to be correct + se.AdditionalInfo = sei.AdditionalInfo + se.Code = sei.Code + se.Message = sei.Message + se.Target = sei.Target + + // converts an []interface{} to []map[string]interface{} + arrayOfObjs := func(v interface{}) ([]map[string]interface{}, bool) { + arrayOf, ok := v.([]interface{}) + if !ok { + return nil, false + } + final := []map[string]interface{}{} + for _, item := range arrayOf { + as, ok := item.(map[string]interface{}) + if !ok { + return nil, false + } + final = append(final, as) + } + return final, true + } + + // convert the remaining fields, falling back to raw JSON if necessary + + if c, ok := arrayOfObjs(sei.Details); ok { + se.Details = c + } else if c, ok := sei.Details.(map[string]interface{}); ok { + se.Details = []map[string]interface{}{c} + } else if sei.Details != nil { + // stuff into Details + se.Details = []map[string]interface{}{ + {"raw": sei.Details}, + } + } + + if c, ok := sei.InnerError.(map[string]interface{}); ok { + se.InnerError = c + } else if c, ok := arrayOfObjs(sei.InnerError); ok { + // if there's only one error extract it + if len(c) == 1 { + se.InnerError = c[0] + } else { + // multiple errors, stuff them into the value + se.InnerError = map[string]interface{}{ + "multi": c, + } + } + } else if c, ok := sei.InnerError.(string); ok { + se.InnerError = map[string]interface{}{"error": c} + } else if sei.InnerError != nil { + // stuff into InnerError + se.InnerError = map[string]interface{}{ + "raw": sei.InnerError, + } + } + return nil +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error" xml:"Error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// Resource contains details about an Azure resource. +type Resource struct { + SubscriptionID string + ResourceGroup string + Provider string + ResourceType string + ResourceName string +} + +// String function returns a string in form of azureResourceID +func (r Resource) String() string { + return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s", r.SubscriptionID, r.ResourceGroup, r.Provider, r.ResourceType, r.ResourceName) +} + +// ParseResourceID parses a resource ID into a ResourceDetails struct. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. +func ParseResourceID(resourceID string) (Resource, error) { + + const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` + resourceIDPattern := regexp.MustCompile(resourceIDPatternText) + match := resourceIDPattern.FindStringSubmatch(resourceID) + + if len(match) == 0 { + return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) + } + + v := strings.Split(match[5], "/") + resourceName := v[len(v)-1] + + result := Resource{ + SubscriptionID: match[1], + ResourceGroup: match[2], + Provider: match[3], + ResourceType: match[4], + ResourceName: resourceName, + } + + return result, nil +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + encodedAs := autorest.EncodedAsJSON + if strings.Contains(resp.Header.Get("Content-Type"), "xml") { + encodedAs = autorest.EncodedAsXML + } + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(encodedAs, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } + if e.ServiceError == nil { + // Check if error is unwrapped ServiceError + decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) + if err := decoder.Decode(&e.ServiceError); err != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), err) + } + + // for example, should the API return the literal value `null` as the response + if e.ServiceError == nil { + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + Details: []map[string]interface{}{ + { + "HttpResponse.Body": b.String(), + }, + }, + } + } + } + + if e.ServiceError != nil && e.ServiceError.Message == "" { + // if we're here it means the returned error wasn't OData v4 compliant. + // try to unmarshal the body in hopes of getting something. + rawBody := map[string]interface{}{} + decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) + if err := decoder.Decode(&rawBody); err != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), err) + } + + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + if len(rawBody) > 0 { + e.ServiceError.Details = []map[string]interface{}{rawBody} + } + } + e.Response = resp + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 0000000000..9bbc0899e4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,269 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" +) + +const ( + // EnvironmentFilepathName captures the name of the environment variable containing the path to the file + // to be used while populating the Azure Environment. + EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" + + // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. + NotAvailable = "N/A" +) + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, +} + +// ResourceIdentifier contains a set of Azure resource IDs. +type ResourceIdentifier struct { + Graph string `json:"graph"` + KeyVault string `json:"keyVault"` + Datalake string `json:"datalake"` + Batch string `json:"batch"` + OperationalInsights string `json:"operationalInsights"` + Storage string `json:"storage"` + Synapse string `json:"synapse"` + ServiceBus string `json:"serviceBus"` +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` + TokenAudience string `json:"tokenAudience"` + APIManagementHostNameSuffix string `json:"apiManagementHostNameSuffix"` + SynapseEndpointSuffix string `json:"synapseEndpointSuffix"` + ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.windows.net/", + BatchManagementEndpoint: "https://batch.core.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.windows.net", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", + CosmosDBDNSSuffix: "documents.azure.com", + TokenAudience: "https://management.azure.com/", + APIManagementHostNameSuffix: "azure-api.net", + SynapseEndpointSuffix: "dev.azuresynapse.net", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.azure.net", + Datalake: "https://datalake.azure.net/", + Batch: "https://batch.core.windows.net/", + OperationalInsights: "https://api.loganalytics.io", + Storage: "https://storage.azure.com/", + Synapse: "https://dev.azuresynapse.net", + ServiceBus: "https://servicebus.azure.net/", + }, + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", + BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.usgovcloudapi.net", + ContainerRegistryDNSSuffix: "azurecr.us", + CosmosDBDNSSuffix: "documents.azure.us", + TokenAudience: "https://management.usgovcloudapi.net/", + APIManagementHostNameSuffix: "azure-api.us", + SynapseEndpointSuffix: NotAvailable, + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.usgovcloudapi.net", + Datalake: NotAvailable, + Batch: "https://batch.core.usgovcloudapi.net/", + OperationalInsights: "https://api.loganalytics.us", + Storage: "https://storage.azure.com/", + Synapse: NotAvailable, + ServiceBus: "https://servicebus.azure.net/", + }, + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", + BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.chinacloudapi.cn", + ContainerRegistryDNSSuffix: "azurecr.cn", + CosmosDBDNSSuffix: "documents.azure.cn", + TokenAudience: "https://management.chinacloudapi.cn/", + APIManagementHostNameSuffix: "azure-api.cn", + SynapseEndpointSuffix: "dev.azuresynapse.azure.cn", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.chinacloudapi.cn/", + KeyVault: "https://vault.azure.cn", + Datalake: NotAvailable, + Batch: "https://batch.chinacloudapi.cn/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + Synapse: "https://dev.azuresynapse.net", + ServiceBus: "https://servicebus.azure.net/", + }, + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + ServiceBusEndpoint: "https://servicebus.cloudapi.de/", + BatchManagementEndpoint: "https://batch.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: NotAvailable, + CosmosDBDNSSuffix: "documents.microsoftazure.de", + TokenAudience: "https://management.microsoftazure.de/", + APIManagementHostNameSuffix: NotAvailable, + SynapseEndpointSuffix: NotAvailable, + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.cloudapi.de/", + KeyVault: "https://vault.microsoftazure.de", + Datalake: NotAvailable, + Batch: "https://batch.cloudapi.de/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + Synapse: NotAvailable, + ServiceBus: "https://servicebus.azure.net/", + }, + } +) + +// EnvironmentFromName returns an Environment based on the common name specified. +func EnvironmentFromName(name string) (Environment, error) { + // IMPORTANT + // As per @radhikagupta5: + // This is technical debt, fundamentally here because Kubernetes is not currently accepting + // contributions to the providers. Once that is an option, the provider should be updated to + // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation + // from this method based on the name that is provided to us. + if strings.EqualFold(name, "AZURESTACKCLOUD") { + return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) + } + + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + + return env, nil +} + +// EnvironmentFromFile loads an Environment from a configuration file available on disk. +// This function is particularly useful in the Hybrid Cloud model, where one must define their own +// endpoints. +func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { + fileContents, err := ioutil.ReadFile(location) + if err != nil { + return + } + + err = json.Unmarshal(fileContents, &unmarshaled) + + return +} + +// SetEnvironment updates the environment map with the specified values. +func SetEnvironment(name string, env Environment) { + environments[strings.ToUpper(name)] = env +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go new file mode 100644 index 0000000000..507f9e95cf --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go @@ -0,0 +1,245 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +type audience []string + +type authentication struct { + LoginEndpoint string `json:"loginEndpoint"` + Audiences audience `json:"audiences"` +} + +type environmentMetadataInfo struct { + GalleryEndpoint string `json:"galleryEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + PortalEndpoint string `json:"portalEndpoint"` + Authentication authentication `json:"authentication"` +} + +// EnvironmentProperty represent property names that clients can override +type EnvironmentProperty string + +const ( + // EnvironmentName ... + EnvironmentName EnvironmentProperty = "name" + // EnvironmentManagementPortalURL .. + EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" + // EnvironmentPublishSettingsURL ... + EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" + // EnvironmentServiceManagementEndpoint ... + EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" + // EnvironmentResourceManagerEndpoint ... + EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" + // EnvironmentActiveDirectoryEndpoint ... + EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" + // EnvironmentGalleryEndpoint ... + EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" + // EnvironmentKeyVaultEndpoint ... + EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" + // EnvironmentGraphEndpoint ... + EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" + // EnvironmentServiceBusEndpoint ... + EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" + // EnvironmentBatchManagementEndpoint ... + EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" + // EnvironmentStorageEndpointSuffix ... + EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" + // EnvironmentSQLDatabaseDNSSuffix ... + EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" + // EnvironmentTrafficManagerDNSSuffix ... + EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" + // EnvironmentKeyVaultDNSSuffix ... + EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" + // EnvironmentServiceBusEndpointSuffix ... + EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" + // EnvironmentServiceManagementVMDNSSuffix ... + EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" + // EnvironmentResourceManagerVMDNSSuffix ... + EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" + // EnvironmentContainerRegistryDNSSuffix ... + EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" + // EnvironmentTokenAudience ... + EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" +) + +// OverrideProperty represents property name and value that clients can override +type OverrideProperty struct { + Key EnvironmentProperty + Value string +} + +// EnvironmentFromURL loads an Environment from a URL +// This function is particularly useful in the Hybrid Cloud model, where one may define their own +// endpoints. +func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { + var metadataEnvProperties environmentMetadataInfo + + if resourceManagerEndpoint == "" { + return environment, fmt.Errorf("Metadata resource manager endpoint is empty") + } + + if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { + return environment, err + } + + // Give priority to user's override values + overrideProperties(&environment, properties) + + if environment.Name == "" { + environment.Name = "HybridEnvironment" + } + stampDNSSuffix := environment.StorageEndpointSuffix + if stampDNSSuffix == "" { + stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") + environment.StorageEndpointSuffix = stampDNSSuffix + } + if environment.KeyVaultDNSSuffix == "" { + environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) + } + if environment.KeyVaultEndpoint == "" { + environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) + } + if environment.TokenAudience == "" { + environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] + } + if environment.ActiveDirectoryEndpoint == "" { + environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint + } + if environment.ResourceManagerEndpoint == "" { + environment.ResourceManagerEndpoint = resourceManagerEndpoint + } + if environment.GalleryEndpoint == "" { + environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint + } + if environment.GraphEndpoint == "" { + environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint + } + + return environment, nil +} + +func overrideProperties(environment *Environment, properties []OverrideProperty) { + for _, property := range properties { + switch property.Key { + case EnvironmentName: + { + environment.Name = property.Value + } + case EnvironmentManagementPortalURL: + { + environment.ManagementPortalURL = property.Value + } + case EnvironmentPublishSettingsURL: + { + environment.PublishSettingsURL = property.Value + } + case EnvironmentServiceManagementEndpoint: + { + environment.ServiceManagementEndpoint = property.Value + } + case EnvironmentResourceManagerEndpoint: + { + environment.ResourceManagerEndpoint = property.Value + } + case EnvironmentActiveDirectoryEndpoint: + { + environment.ActiveDirectoryEndpoint = property.Value + } + case EnvironmentGalleryEndpoint: + { + environment.GalleryEndpoint = property.Value + } + case EnvironmentKeyVaultEndpoint: + { + environment.KeyVaultEndpoint = property.Value + } + case EnvironmentGraphEndpoint: + { + environment.GraphEndpoint = property.Value + } + case EnvironmentServiceBusEndpoint: + { + environment.ServiceBusEndpoint = property.Value + } + case EnvironmentBatchManagementEndpoint: + { + environment.BatchManagementEndpoint = property.Value + } + case EnvironmentStorageEndpointSuffix: + { + environment.StorageEndpointSuffix = property.Value + } + case EnvironmentSQLDatabaseDNSSuffix: + { + environment.SQLDatabaseDNSSuffix = property.Value + } + case EnvironmentTrafficManagerDNSSuffix: + { + environment.TrafficManagerDNSSuffix = property.Value + } + case EnvironmentKeyVaultDNSSuffix: + { + environment.KeyVaultDNSSuffix = property.Value + } + case EnvironmentServiceBusEndpointSuffix: + { + environment.ServiceBusEndpointSuffix = property.Value + } + case EnvironmentServiceManagementVMDNSSuffix: + { + environment.ServiceManagementVMDNSSuffix = property.Value + } + case EnvironmentResourceManagerVMDNSSuffix: + { + environment.ResourceManagerVMDNSSuffix = property.Value + } + case EnvironmentContainerRegistryDNSSuffix: + { + environment.ContainerRegistryDNSSuffix = property.Value + } + case EnvironmentTokenAudience: + { + environment.TokenAudience = property.Value + } + } + } +} + +func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { + client := autorest.NewClientWithUserAgent("") + managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") + req, _ := http.NewRequest("GET", managementEndpoint, nil) + response, err := client.Do(req) + if err != nil { + return environment, err + } + defer response.Body.Close() + jsonResponse, err := ioutil.ReadAll(response.Body) + if err != nil { + return environment, err + } + err = json.Unmarshal(jsonResponse, &environment) + return environment, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 0000000000..c6d39f6866 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,204 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { + return resp, err + } + + var re RequestError + if strings.Contains(r.Header.Get("Content-Type"), "xml") { + // XML errors (e.g. Storage Data Plane) only return the inner object + err = autorest.Respond(resp, autorest.ByUnmarshallingXML(&re.ServiceError)) + } else { + err = autorest.Respond(resp, autorest.ByUnmarshallingJSON(&re)) + } + + if err != nil { + return resp, err + } + err = re + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + regErr := register(client, r, re) + if regErr != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) + } + } + } + return resp, err + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { + return re.ServiceError.Details[0]["target"].(string), nil + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + registrationStartTime := time.Now() + for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) + if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { + return originalReq.Context().Err() + } + } + if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 0000000000..898db8b954 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,324 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/tls" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/logger" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 + + // DefaultRetryDuration is the duration to wait between retries. + DefaultRetryDuration = 30 * time.Second +) + +var ( + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. +// If there was no response (i.e. the underlying http.Response is nil) the return value is false. +func (r Response) IsHTTPStatus(statusCode int) bool { + if r.Response == nil { + return false + } + return r.Response.StatusCode == statusCode +} + +// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. +// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided +// the return value is false. +func (r Response) HasHTTPStatus(statusCodes ...int) bool { + return ResponseHasStatusCode(r.Response, statusCodes...) +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + // Setting this to zero will use the provided context to control the duration. + PollingDuration time.Duration + + // RetryAttempts sets the total number of times the client will attempt to make an HTTP request. + // Set the value to 1 to disable retries. DO NOT set the value to less than 1. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar + + // Set to true to skip attempted registration of resource providers (false by default). + SkipResourceProviderRegistration bool + + // SendDecorators can be used to override the default chain of SendDecorators. + // This can be used to specify things like a custom retry SendDecorator. + // Set this to an empty slice to use no SendDecorators. + SendDecorators []SendDecorator +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + return newClient(ua, tls.RenegotiateNever) +} + +// ClientOptions contains various Client configuration options. +type ClientOptions struct { + // UserAgent is an optional user-agent string to append to the default user agent. + UserAgent string + + // Renegotiation is an optional setting to control client-side TLS renegotiation. + Renegotiation tls.RenegotiationSupport +} + +// NewClientWithOptions returns an instance of a Client with the specified values. +func NewClientWithOptions(options ClientOptions) Client { + return newClient(options.UserAgent, options.Renegotiation) +} + +func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { + c := Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: DefaultRetryDuration, + UserAgent: UserAgent(), + } + c.Sender = c.sender(renegotiation) + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations + r, err := Prepare(r, + c.WithAuthorization(), + c.WithInspection()) + if err != nil { + var resp *http.Response + if detErr, ok := err.(DetailedError); ok { + // if the authorization failed (e.g. invalid credentials) there will + // be a response associated with the error, be sure to return it. + resp = detErr.Response + } + return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + logger.Instance.WriteRequest(r, logger.Filter{ + Header: func(k string, v []string) (bool, []string) { + // remove the auth token from the log + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { + v = []string{"**REDACTED**"} + } + return true, v + }, + }) + resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) + logger.Instance.WriteResponse(resp, logger.Filter{}) + Respond(resp, c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { + if c.Sender == nil { + return sender(renengotiation) + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} + +// Send sends the provided http.Request using the client's Sender or the default sender. +// It returns the http.Response and possible error. It also accepts a, possibly empty, +// default set of SendDecorators used when sending the request. +// SendDecorators have the following precedence: +// 1. In a request's context via WithSendDecorators() +// 2. Specified on the client in SendDecorators +// 3. The default values specified in this method +func (c Client) Send(req *http.Request, decorators ...SendDecorator) (*http.Response, error) { + if c.SendDecorators != nil { + decorators = c.SendDecorators + } + inCtx := req.Context().Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + decorators = sd + } + return SendWithSender(c, req, decorators...) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 0000000000..c457106568 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,96 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod new file mode 100644 index 0000000000..f88ecc4022 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/autorest/date + +go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.sum b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum new file mode 100644 index 0000000000..1fc56a962e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go new file mode 100644 index 0000000000..4e05432071 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 0000000000..b453fad049 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,103 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 0000000000..48fb39ba9b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,100 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 0000000000..7073959b2a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,123 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 0000000000..12addf0ebb --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,25 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 0000000000..35098eda8e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,103 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + Response: resp, + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} + +// Unwrap returns the original error. +func (e DetailedError) Unwrap() error { + return e.Original +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod new file mode 100644 index 0000000000..75a534f108 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go.mod @@ -0,0 +1,12 @@ +module github.com/Azure/go-autorest/autorest + +go 1.12 + +require ( + github.com/Azure/go-autorest v14.2.0+incompatible + github.com/Azure/go-autorest/autorest/adal v0.9.5 + github.com/Azure/go-autorest/autorest/mocks v0.4.1 + github.com/Azure/go-autorest/logger v0.2.0 + github.com/Azure/go-autorest/tracing v0.6.0 + golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum new file mode 100644 index 0000000000..fa27c68d10 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go.sum @@ -0,0 +1,23 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go new file mode 100644 index 0000000000..da65e1041e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 0000000000..98574a4155 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,547 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeOctetStream = "application/octet-stream" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerAuxAuthorization = "x-ms-authorization-auxiliary" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// used as a key type in context.WithValue() +type ctxPrepareDecorators struct{} + +// WithPrepareDecorators adds the specified PrepareDecorators to the provided context. +// If no PrepareDecorators are provided the context is unchanged. +func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context { + if len(prepareDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator) +} + +// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators. +func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator { + inCtx := ctx.Value(ctxPrepareDecorators{}) + if pd, ok := inCtx.([]PrepareDecorator); ok { + return pd + } + return defaultPrepareDecorators +} + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + setHeader(r, http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to +// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before +// adding them. +func WithHeaders(headers map[string]interface{}) PrepareDecorator { + h := ensureValueStrings(headers) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + + for name, value := range h { + r.Header.Set(http.CanonicalHeaderKey(name), value) + } + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. +func AsOctetStream() PrepareDecorator { + return AsContentType(mimeTypeOctetStream) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. +func AsMerge() PrepareDecorator { return WithMethod("MERGE") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. Query parameters will be encoded as required. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + return r, fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if u.RawQuery != "" { + q, err := url.ParseQuery(u.RawQuery) + if err != nil { + return r, err + } + u.RawQuery = q.Encode() + } + r.URL = u + } + return r, err + }) + } +} + +// WithBytes returns a PrepareDecorator that takes a list of bytes +// which passes the bytes directly to the body +func WithBytes(input *[]byte) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if input == nil { + return r, fmt.Errorf("Input Bytes was nil") + } + + r.ContentLength = int64(len(*input)) + r.Body = ioutil.NopCloser(bytes.NewReader(*input)) + } + return r, err + }) + } +} + +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + + setHeader(r, http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + setHeader(r, http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the +// request and sets the Content-Length header. +func WithXML(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := xml.Marshal(v) + if err == nil { + // we have to tack on an XML header + withHeader := xml.Header + string(b) + bytesWithHeader := []byte(withHeader) + + r.ContentLength = int64(len(bytesWithHeader)) + setHeader(r, headerContentLength, fmt.Sprintf("%d", len(bytesWithHeader))) + r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := MapToValues(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + v := r.URL.Query() + for key, value := range parameters { + for i := range value { + d, err := url.QueryUnescape(value[i]) + if err != nil { + return r, err + } + value[i] = d + } + v[key] = value + } + r.URL.RawQuery = v.Encode() + } + return r, err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 0000000000..349e1963a2 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,269 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingBytes(v *[]byte) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + bytes, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + *v = bytes + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 0000000000..fa11dbed79 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,52 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 0000000000..7143cc61b5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,54 @@ +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 0000000000..ae15c6bf96 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,66 @@ +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 0000000000..78610ef204 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,447 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/tls" + "fmt" + "log" + "math" + "net/http" + "net/http/cookiejar" + "strconv" + "sync" + "time" + + "github.com/Azure/go-autorest/tracing" +) + +// there is one sender per TLS renegotiation type, i.e. count of tls.RenegotiationSupport enums +const defaultSendersCount = 3 + +type defaultSender struct { + sender Sender + init *sync.Once +} + +// each type of sender will be created on demand in sender() +var defaultSenders [defaultSendersCount]defaultSender + +func init() { + for i := 0; i < defaultSendersCount; i++ { + defaultSenders[i].init = &sync.Once{} + } +} + +// used as a key type in context.WithValue() +type ctxSendDecorators struct{} + +// WithSendDecorators adds the specified SendDecorators to the provided context. +// If no SendDecorators are provided the context is unchanged. +func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { + if len(sendDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) +} + +// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. +func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { + inCtx := ctx.Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + return sd + } + return defaultSendDecorators +} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(tls.RenegotiateNever), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(sender(tls.RenegotiateNever), r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +func sender(renengotiation tls.RenegotiationSupport) Sender { + // note that we can't init defaultSenders in init() since it will + // execute before calling code has had a chance to enable tracing + defaultSenders[renengotiation].init.Do(func() { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + Renegotiation: renengotiation, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + defaultSenders[renengotiation].sender = &http.Client{Jar: j, Transport: roundTripper} + }) + return defaultSenders[renengotiation].sender +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Context().Done()) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByDiscardingBody(), ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequestWithContext(r.Context(), resp) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByDiscardingBody(), + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// Count429AsRetry indicates that a 429 response should be included as a retry attempt. +var Count429AsRetry = true + +// Max429Delay is the maximum duration to wait between retries on a 429 if no Retry-After header was received. +var Max429Delay time.Duration + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. +// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, 0, codes...) + }) + } +} + +// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the +// specified number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater +// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. +func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, cap, codes...) + }) + } +} + +func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + for attempt, delayCount := 0, 0; attempt < attempts+1; { + err = rr.Prepare() + if err != nil { + return + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { + return resp, err + } + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + // if this was a 429 set the delay cap as specified. + // applicable only in the absence of a retry-after header. + if resp != nil && resp.StatusCode == http.StatusTooManyRequests { + cap = Max429Delay + } + if !delayed && !DelayForBackoffWithCap(backoff, cap, delayCount, r.Context().Done()) { + return resp, r.Context().Err() + } + // when count429 == false don't count a 429 against the number + // of attempts so that we continue to retry until it succeeds + if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { + attempt++ + } + // delay count is tracked separately from attempts to + // ensure that 429 participates in exponential back-off + delayCount++ + } + return resp, err +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. +// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. +// The function returns true after successfully waiting for the specified duration. If there is +// no Retry-After header or the wait is cancelled the return value is false. +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + if resp == nil { + return false + } + var dur time.Duration + ra := resp.Header.Get("Retry-After") + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + dur = time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + dur = t.Sub(time.Now()) + } + if dur > 0 { + select { + case <-time.After(dur): + return true + case <-cancel: + return false + } + } + return false +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + return DelayForBackoffWithCap(backoff, 0, attempt, cancel) +} + +// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. +// The delay may be canceled by closing the passed channel. If terminated early, returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { + d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second + if cap > 0 && d > cap { + d = cap + } + select { + case <-time.After(d): + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 0000000000..3467b8fa60 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,232 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "reflect" + "strings" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// AsStringSlice method converts interface{} to []string. +// s must be of type slice or array or an error is returned. +// Each element of s will be converted to its string representation. +func AsStringSlice(s interface{}) ([]string, error) { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return nil, NewError("autorest", "AsStringSlice", "the value's type is not a slice or array.") + } + stringSlice := make([]string, 0, v.Len()) + + for i := 0; i < v.Len(); i++ { + stringSlice = append(stringSlice, fmt.Sprintf("%v", v.Index(i))) + } + return stringSlice, nil +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using the separator. Note that only sep[0] will be used for +// joining if any separator is specified. +func String(v interface{}, sep ...string) string { + if len(sep) == 0 { + return ensureValueString(v) + } + stringSlice, ok := v.([]string) + if ok == false { + var err error + stringSlice, err = AsStringSlice(v) + if err != nil { + panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) + } + } + return ensureValueString(strings.Join(stringSlice, sep[0])) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). +// This is mainly useful for long-running operations that use the Azure-AsyncOperation +// header, so we change the initial PUT into a GET to retrieve the final result. +func ChangeToGet(req *http.Request) *http.Request { + req.Method = "GET" + req.Body = nil + req.ContentLength = 0 + req.Header.Del("Content-Length") + return req +} + +// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false +// if it's not. If the error doesn't implement the net.Error interface the return value is true. +func IsTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { + return true + } + return false +} + +// DrainResponseBody reads the response body then closes it. +func DrainResponseBody(resp *http.Response) error { + if resp != nil && resp.Body != nil { + _, err := io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + return err + } + return nil +} + +func setHeader(r *http.Request, key, value string) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(key, value) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go new file mode 100644 index 0000000000..4cb5e6849f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go @@ -0,0 +1,29 @@ +// +build go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "errors" + + "github.com/Azure/go-autorest/autorest/adal" +) + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError interface. +func IsTokenRefreshError(err error) bool { + var tre adal.TokenRefreshError + return errors.As(err, &tre) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go new file mode 100644 index 0000000000..ebb51b4f53 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go @@ -0,0 +1,31 @@ +// +build !go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import "github.com/Azure/go-autorest/autorest/adal" + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError +// interface. If err is a DetailedError it will walk the chain of Original errors. +func IsTokenRefreshError(err error) bool { + if _, ok := err.(adal.TokenRefreshError); ok { + return true + } + if de, ok := err.(DetailedError); ok { + return IsTokenRefreshError(de.Original) + } + return false +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 0000000000..713e23581d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,41 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "runtime" +) + +const number = "v14.2.1" + +var ( + userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. +func UserAgent() string { + return userAgent +} + +// Version returns the semantic version (see http://semver.org). +func Version() string { + return number +} diff --git a/vendor/github.com/Azure/go-autorest/azure-pipelines.yml b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml new file mode 100644 index 0000000000..6fb8404fd0 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml @@ -0,0 +1,105 @@ +variables: + GOPATH: '$(system.defaultWorkingDirectory)/work' + sdkPath: '$(GOPATH)/src/github.com/$(build.repository.name)' + +jobs: + - job: 'goautorest' + displayName: 'Run go-autorest CI Checks' + + strategy: + matrix: + Linux_Go113: + vm.image: 'ubuntu-18.04' + go.version: '1.13' + Linux_Go114: + vm.image: 'ubuntu-18.04' + go.version: '1.14' + + pool: + vmImage: '$(vm.image)' + + steps: + - task: GoTool@0 + inputs: + version: '$(go.version)' + displayName: "Select Go Version" + + - script: | + set -e + mkdir -p '$(GOPATH)/bin' + mkdir -p '$(sdkPath)' + shopt -s extglob + mv !(work) '$(sdkPath)' + echo '##vso[task.prependpath]$(GOPATH)/bin' + displayName: 'Create Go Workspace' + + - script: | + set -e + curl -sSL https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure -v + go install ./vendor/golang.org/x/lint/golint + go get github.com/jstemmer/go-junit-report + go get github.com/axw/gocov/gocov + go get github.com/AlekSi/gocov-xml + go get -u github.com/matm/gocov-html + workingDirectory: '$(sdkPath)' + displayName: 'Install Dependencies' + + - script: | + go vet ./autorest/... + go vet ./logger/... + go vet ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Vet' + + - script: | + go build -v ./autorest/... + go build -v ./logger/... + go build -v ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Build' + + - script: | + set -e + go test -race -v -coverprofile=coverage.txt -covermode atomic ./autorest/... ./logger/... ./tracing/... 2>&1 | go-junit-report > report.xml + gocov convert coverage.txt > coverage.json + gocov-xml < coverage.json > coverage.xml + gocov-html < coverage.json > coverage.html + workingDirectory: '$(sdkPath)' + displayName: 'Run Tests' + + - script: grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Copyright Header Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + gofmt -s -l -w ./autorest/. >&2 + gofmt -s -l -w ./logger/. >&2 + gofmt -s -l -w ./tracing/. >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Format Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + golint ./autorest/... >&2 + golint ./logger/... >&2 + golint ./tracing/... >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Linter Check' + failOnStderr: true + condition: succeededOrFailed() + + - task: PublishTestResults@2 + inputs: + testRunner: JUnit + testResultsFiles: $(sdkPath)/report.xml + failTaskOnFailedTests: true + + - task: PublishCodeCoverageResults@1 + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: $(sdkPath)/coverage.xml + additionalCodeCoverageFiles: $(sdkPath)/coverage.html diff --git a/vendor/github.com/Azure/go-autorest/doc.go b/vendor/github.com/Azure/go-autorest/doc.go new file mode 100644 index 0000000000..99ae6ca988 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/doc.go @@ -0,0 +1,18 @@ +/* +Package go-autorest provides an HTTP request client for use with Autorest-generated API client packages. +*/ +package go_autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/Azure/go-autorest/logger/go.mod new file mode 100644 index 0000000000..bedeaee039 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/logger + +go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/logger/go.sum b/vendor/github.com/Azure/go-autorest/logger/go.sum new file mode 100644 index 0000000000..1fc56a962e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go new file mode 100644 index 0000000000..0aa27680db --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go new file mode 100644 index 0000000000..da09f394c5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -0,0 +1,328 @@ +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// LevelType tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LevelType uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LevelType = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug +) + +const ( + logNone = "NONE" + logFatal = "FATAL" + logPanic = "PANIC" + logError = "ERROR" + logWarning = "WARNING" + logInfo = "INFO" + logDebug = "DEBUG" + logUnknown = "UNKNOWN" +) + +// ParseLevel converts the specified string into the corresponding LevelType. +func ParseLevel(s string) (lt LevelType, err error) { + switch strings.ToUpper(s) { + case logFatal: + lt = LogFatal + case logPanic: + lt = LogPanic + case logError: + lt = LogError + case logWarning: + lt = LogWarning + case logInfo: + lt = LogInfo + case logDebug: + lt = LogDebug + default: + err = fmt.Errorf("bad log level '%s'", s) + } + return +} + +// String implements the stringer interface for LevelType. +func (lt LevelType) String() string { + switch lt { + case LogNone: + return logNone + case LogFatal: + return logFatal + case LogPanic: + return logPanic + case LogError: + return logError + case LogWarning: + return logWarning + case LogInfo: + return logInfo + case LogDebug: + return logDebug + default: + return logUnknown + } +} + +// Filter defines functions for filtering HTTP request/response content. +type Filter struct { + // URL returns a potentially modified string representation of a request URL. + URL func(u *url.URL) string + + // Header returns a potentially modified set of values for the specified key. + // To completely exclude the header key/values return false. + Header func(key string, val []string) (bool, []string) + + // Body returns a potentially modified request/response body. + Body func(b []byte) []byte +} + +func (f Filter) processURL(u *url.URL) string { + if f.URL == nil { + return u.String() + } + return f.URL(u) +} + +func (f Filter) processHeader(k string, val []string) (bool, []string) { + if f.Header == nil { + return true, val + } + return f.Header(k, val) +} + +func (f Filter) processBody(b []byte) []byte { + if f.Body == nil { + return b + } + return f.Body(b) +} + +// Writer defines methods for writing to a logging facility. +type Writer interface { + // Writeln writes the specified message with the standard log entry header and new-line character. + Writeln(level LevelType, message string) + + // Writef writes the specified format specifier with the standard log entry header and no new-line character. + Writef(level LevelType, format string, a ...interface{}) + + // WriteRequest writes the specified HTTP request to the logger if the log level is greater than + // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no request content is excluded. + WriteRequest(req *http.Request, filter Filter) + + // WriteResponse writes the specified HTTP response to the logger if the log level is greater than + // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no response content is excluded. + WriteResponse(resp *http.Response, filter Filter) +} + +// Instance is the default log writer initialized during package init. +// This can be replaced with a custom implementation as required. +var Instance Writer + +// default log level +var logLevel = LogNone + +// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. +// If no value was specified the default value is LogNone. +// Custom loggers can call this to retrieve the configured log level. +func Level() LevelType { + return logLevel +} + +func init() { + // separated for testing purposes + initDefaultLogger() +} + +func initDefaultLogger() { + // init with nilLogger so callers don't have to do a nil check on Default + Instance = nilLogger{} + llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) + if llStr == "" { + return + } + var err error + logLevel, err = ParseLevel(llStr) + if err != nil { + fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) + return + } + if logLevel == LogNone { + return + } + // default to stderr + dest := os.Stderr + lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") + if strings.EqualFold(lfStr, "stdout") { + dest = os.Stdout + } else if lfStr != "" { + lf, err := os.Create(lfStr) + if err == nil { + dest = lf + } else { + fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) + } + } + Instance = fileLogger{ + logLevel: logLevel, + mu: &sync.Mutex{}, + logFile: dest, + } +} + +// the nil logger does nothing +type nilLogger struct{} + +func (nilLogger) Writeln(LevelType, string) {} + +func (nilLogger) Writef(LevelType, string, ...interface{}) {} + +func (nilLogger) WriteRequest(*http.Request, Filter) {} + +func (nilLogger) WriteResponse(*http.Response, Filter) {} + +// A File is used instead of a Logger so the stream can be flushed after every write. +type fileLogger struct { + logLevel LevelType + mu *sync.Mutex // for synchronizing writes to logFile + logFile *os.File +} + +func (fl fileLogger) Writeln(level LevelType, message string) { + fl.Writef(level, "%s\n", message) +} + +func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { + if fl.logLevel >= level { + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) + fl.logFile.Sync() + } +} + +func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { + if req == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) + // dump headers + for k, v := range req.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(req.Header, req.Body) { + // dump body + body, err := ioutil.ReadAll(req.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + if nc, ok := req.Body.(io.Seeker); ok { + // rewind to the beginning + nc.Seek(0, io.SeekStart) + } else { + // recreate the body + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + } + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { + if resp == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) + // dump headers + for k, v := range resp.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(resp.Header, resp.Body) { + // dump body + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +// returns true if the provided body should be included in the log +func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { + ct := header.Get("Content-Type") + return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") +} + +// creates standard header for log entries, it contains a timestamp and the log level +func entryHeader(level LevelType) string { + // this format provides a fixed number of digits so the size of the timestamp is constant + return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) +} diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/Azure/go-autorest/tracing/go.mod new file mode 100644 index 0000000000..a2cdec78c8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/tracing + +go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.sum b/vendor/github.com/Azure/go-autorest/tracing/go.sum new file mode 100644 index 0000000000..1fc56a962e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go new file mode 100644 index 0000000000..e163975cd4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package tracing + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go new file mode 100644 index 0000000000..0e7a6e9625 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go @@ -0,0 +1,67 @@ +package tracing + +// Copyright 2018 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" +) + +// Tracer represents an HTTP tracing facility. +type Tracer interface { + NewTransport(base *http.Transport) http.RoundTripper + StartSpan(ctx context.Context, name string) context.Context + EndSpan(ctx context.Context, httpStatusCode int, err error) +} + +var ( + tracer Tracer +) + +// Register will register the provided Tracer. Pass nil to unregister a Tracer. +func Register(t Tracer) { + tracer = t +} + +// IsEnabled returns true if a Tracer has been registered. +func IsEnabled() bool { + return tracer != nil +} + +// NewTransport creates a new instrumenting http.RoundTripper for the +// registered Tracer. If no Tracer has been registered it returns nil. +func NewTransport(base *http.Transport) http.RoundTripper { + if tracer != nil { + return tracer.NewTransport(base) + } + return nil +} + +// StartSpan starts a trace span with the specified name, associating it with the +// provided context. Has no effect if a Tracer has not been registered. +func StartSpan(ctx context.Context, name string) context.Context { + if tracer != nil { + return tracer.StartSpan(ctx, name) + } + return ctx +} + +// EndSpan ends a previously started span stored in the context. +// Has no effect if a Tracer has not been registered. +func EndSpan(ctx context.Context, httpStatusCode int, err error) { + if tracer != nil { + tracer.EndSpan(ctx, httpStatusCode, err) + } +} diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/Shopify/sarama/.golangci.yml index 09e5c468cb..f92a7167c2 100644 --- a/vendor/github.com/Shopify/sarama/.golangci.yml +++ b/vendor/github.com/Shopify/sarama/.golangci.yml @@ -40,6 +40,7 @@ linters: - bodyclose - deadcode - depguard + - exportloopref - dogsled # - dupl - errcheck @@ -55,8 +56,9 @@ linters: # - gosimple - govet # - ineffassign - # - misspell + - misspell # - nakedret + - nilerr # - scopelint - staticcheck - structcheck diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md index 59ccd1de58..dd826e273d 100644 --- a/vendor/github.com/Shopify/sarama/CHANGELOG.md +++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md @@ -2,6 +2,10 @@ #### Unreleased +# Improvements + +- #1912 - @faillefer Support for --delete-offsets for consumer group topic + #### Version 1.28.0 (2021-02-15) **Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.** diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile index 4714d7798f..8f8fc6bdb0 100644 --- a/vendor/github.com/Shopify/sarama/Makefile +++ b/vendor/github.com/Shopify/sarama/Makefile @@ -2,7 +2,7 @@ default: fmt get update test lint GO := go GOBUILD := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG) -GOTEST := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 6m -coverprofile=profile.out -covermode=atomic +GOTEST := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 10m -coverprofile=profile.out -covermode=atomic FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go') TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go') diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md index f2beb73931..f66550a8b0 100644 --- a/vendor/github.com/Shopify/sarama/README.md +++ b/vendor/github.com/Shopify/sarama/README.md @@ -1,8 +1,7 @@ # sarama [![Go Reference](https://pkg.go.dev/badge/github.com/Shopify/sarama.svg)](https://pkg.go.dev/github.com/Shopify/sarama) -[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama) -[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) +[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/main/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later). @@ -20,7 +19,7 @@ You might also want to look at the [Frequently Asked Questions](https://github.c Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest stable releases of Kafka and Go, and we provide a two month grace period for older releases. This means we currently officially support -Go 1.15 through 1.16, and Kafka 2.6 through 2.8, although older releases are +Go 1.15 through 1.16, and Kafka 2.7 through 2.8, although older releases are still likely to work. Sarama follows semantic versioning and provides API stability via the gopkg.in service. @@ -29,7 +28,7 @@ A changelog is available [here](CHANGELOG.md). ## Contributing -- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md). +- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/main/.github/CONTRIBUTING.md). - Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details. - The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information. - For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go index abe18b19f0..59fd9983e1 100644 --- a/vendor/github.com/Shopify/sarama/admin.go +++ b/vendor/github.com/Shopify/sarama/admin.go @@ -95,6 +95,9 @@ type ClusterAdmin interface { // List the consumer group offsets available in the cluster. ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) + // Deletes a consumer group offset + DeleteConsumerGroupOffset(group string, topic string, partition int32) error + // Delete a consumer group. DeleteConsumerGroup(group string) error @@ -113,6 +116,18 @@ type ClusterAdmin interface { // Upsert SCRAM users UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error) + // Get client quota configurations corresponding to the specified filter. + // This operation is supported by brokers with version 2.6.0.0 or higher. + DescribeClientQuotas(components []QuotaFilterComponent, strict bool) ([]DescribeClientQuotasEntry, error) + + // Alters client quota configurations with the specified alterations. + // This operation is supported by brokers with version 2.6.0.0 or higher. + AlterClientQuotas(entity []QuotaEntityComponent, op ClientQuotasOp, validateOnly bool) error + + // Controller returns the cluster controller broker. It will return a + // locally cached value if it's available. + Controller() (*Broker, error) + // Close shuts down the admin and closes underlying client. Close() error } @@ -443,6 +458,7 @@ func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [ request := &CreatePartitionsRequest{ TopicPartitions: topicPartitions, Timeout: ca.conf.Admin.Timeout, + ValidateOnly: validateOnly, } return ca.retryOnError(isErrNoController, func() error { @@ -874,6 +890,34 @@ func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions m return coordinator.FetchOffset(request) } +func (ca *clusterAdmin) DeleteConsumerGroupOffset(group string, topic string, partition int32) error { + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } + + request := &DeleteOffsetsRequest{ + Group: group, + partitions: map[string][]int32{ + topic: {partition}, + }, + } + + resp, err := coordinator.DeleteOffsets(request) + if err != nil { + return err + } + + if resp.ErrorCode != ErrNoError { + return resp.ErrorCode + } + + if resp.Errors[topic][partition] != ErrNoError { + return resp.Errors[topic][partition] + } + return nil +} + func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { coordinator, err := ca.client.Coordinator(group) if err != nil { @@ -1003,3 +1047,62 @@ func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsU return rsp.Results, nil } + +// Describe All : use an empty/nil components slice + strict = false +// Contains components: strict = false +// Contains only components: strict = true +func (ca *clusterAdmin) DescribeClientQuotas(components []QuotaFilterComponent, strict bool) ([]DescribeClientQuotasEntry, error) { + request := &DescribeClientQuotasRequest{ + Components: components, + Strict: strict, + } + + b, err := ca.Controller() + if err != nil { + return nil, err + } + + rsp, err := b.DescribeClientQuotas(request) + if err != nil { + return nil, err + } + + if rsp.ErrorMsg != nil { + return nil, errors.New(*rsp.ErrorMsg) + } + if rsp.ErrorCode != ErrNoError { + return nil, rsp.ErrorCode + } + + return rsp.Entries, nil +} + +func (ca *clusterAdmin) AlterClientQuotas(entity []QuotaEntityComponent, op ClientQuotasOp, validateOnly bool) error { + entry := AlterClientQuotasEntry{ + Entity: entity, + Ops: []ClientQuotasOp{op}, + } + + request := &AlterClientQuotasRequest{ + Entries: []AlterClientQuotasEntry{entry}, + ValidateOnly: validateOnly, + } + + b, err := ca.Controller() + if err != nil { + return err + } + + rsp, err := b.AlterClientQuotas(request) + if err != nil { + return err + } + + for _, entry := range rsp.Entries { + if entry.ErrorCode != ErrNoError { + return entry.ErrorCode + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/alter_client_quotas_request.go b/vendor/github.com/Shopify/sarama/alter_client_quotas_request.go new file mode 100644 index 0000000000..f528512d02 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_client_quotas_request.go @@ -0,0 +1,194 @@ +package sarama + +// AlterClientQuotas Request (Version: 0) => [entries] validate_only +// entries => [entity] [ops] +// entity => entity_type entity_name +// entity_type => STRING +// entity_name => NULLABLE_STRING +// ops => key value remove +// key => STRING +// value => FLOAT64 +// remove => BOOLEAN +// validate_only => BOOLEAN + +type AlterClientQuotasRequest struct { + Entries []AlterClientQuotasEntry // The quota configuration entries to alter. + ValidateOnly bool // Whether the alteration should be validated, but not performed. +} + +type AlterClientQuotasEntry struct { + Entity []QuotaEntityComponent // The quota entity to alter. + Ops []ClientQuotasOp // An individual quota configuration entry to alter. +} + +type ClientQuotasOp struct { + Key string // The quota configuration key. + Value float64 // The value to set, otherwise ignored if the value is to be removed. + Remove bool // Whether the quota configuration value should be removed, otherwise set. +} + +func (a *AlterClientQuotasRequest) encode(pe packetEncoder) error { + // Entries + if err := pe.putArrayLength(len(a.Entries)); err != nil { + return err + } + for _, e := range a.Entries { + if err := e.encode(pe); err != nil { + return err + } + } + + // ValidateOnly + pe.putBool(a.ValidateOnly) + + return nil +} + +func (a *AlterClientQuotasRequest) decode(pd packetDecoder, version int16) error { + // Entries + entryCount, err := pd.getArrayLength() + if err != nil { + return err + } + if entryCount > 0 { + a.Entries = make([]AlterClientQuotasEntry, entryCount) + for i := range a.Entries { + e := AlterClientQuotasEntry{} + if err = e.decode(pd, version); err != nil { + return err + } + a.Entries[i] = e + } + } else { + a.Entries = []AlterClientQuotasEntry{} + } + + // ValidateOnly + validateOnly, err := pd.getBool() + if err != nil { + return err + } + a.ValidateOnly = validateOnly + + return nil +} + +func (a *AlterClientQuotasEntry) encode(pe packetEncoder) error { + // Entity + if err := pe.putArrayLength(len(a.Entity)); err != nil { + return err + } + for _, component := range a.Entity { + if err := component.encode(pe); err != nil { + return err + } + } + + // Ops + if err := pe.putArrayLength(len(a.Ops)); err != nil { + return err + } + for _, o := range a.Ops { + if err := o.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (a *AlterClientQuotasEntry) decode(pd packetDecoder, version int16) error { + // Entity + componentCount, err := pd.getArrayLength() + if err != nil { + return err + } + if componentCount > 0 { + a.Entity = make([]QuotaEntityComponent, componentCount) + for i := 0; i < componentCount; i++ { + component := QuotaEntityComponent{} + if err := component.decode(pd, version); err != nil { + return err + } + a.Entity[i] = component + } + } else { + a.Entity = []QuotaEntityComponent{} + } + + // Ops + opCount, err := pd.getArrayLength() + if err != nil { + return err + } + if opCount > 0 { + a.Ops = make([]ClientQuotasOp, opCount) + for i := range a.Ops { + c := ClientQuotasOp{} + if err = c.decode(pd, version); err != nil { + return err + } + a.Ops[i] = c + } + } else { + a.Ops = []ClientQuotasOp{} + } + + return nil +} + +func (c *ClientQuotasOp) encode(pe packetEncoder) error { + // Key + if err := pe.putString(c.Key); err != nil { + return err + } + + // Value + pe.putFloat64(c.Value) + + // Remove + pe.putBool(c.Remove) + + return nil +} + +func (c *ClientQuotasOp) decode(pd packetDecoder, version int16) error { + // Key + key, err := pd.getString() + if err != nil { + return err + } + c.Key = key + + // Value + value, err := pd.getFloat64() + if err != nil { + return err + } + c.Value = value + + // Remove + remove, err := pd.getBool() + if err != nil { + return err + } + c.Remove = remove + + return nil +} + +func (a *AlterClientQuotasRequest) key() int16 { + return 49 +} + +func (a *AlterClientQuotasRequest) version() int16 { + return 0 +} + +func (a *AlterClientQuotasRequest) headerVersion() int16 { + return 1 +} + +func (a *AlterClientQuotasRequest) requiredVersion() KafkaVersion { + return V2_6_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/alter_client_quotas_response.go b/vendor/github.com/Shopify/sarama/alter_client_quotas_response.go new file mode 100644 index 0000000000..ccd27d5f5e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_client_quotas_response.go @@ -0,0 +1,145 @@ +package sarama + +import ( + "time" +) + +// AlterClientQuotas Response (Version: 0) => throttle_time_ms [entries] +// throttle_time_ms => INT32 +// entries => error_code error_message [entity] +// error_code => INT16 +// error_message => NULLABLE_STRING +// entity => entity_type entity_name +// entity_type => STRING +// entity_name => NULLABLE_STRING + +type AlterClientQuotasResponse struct { + ThrottleTime time.Duration // The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota. + Entries []AlterClientQuotasEntryResponse // The quota configuration entries altered. +} + +type AlterClientQuotasEntryResponse struct { + ErrorCode KError // The error code, or `0` if the quota alteration succeeded. + ErrorMsg *string // The error message, or `null` if the quota alteration succeeded. + Entity []QuotaEntityComponent // The quota entity altered. +} + +func (a *AlterClientQuotasResponse) encode(pe packetEncoder) error { + // ThrottleTime + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + + // Entries + if err := pe.putArrayLength(len(a.Entries)); err != nil { + return err + } + for _, e := range a.Entries { + if err := e.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (a *AlterClientQuotasResponse) decode(pd packetDecoder, version int16) error { + // ThrottleTime + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + // Entries + entryCount, err := pd.getArrayLength() + if err != nil { + return err + } + if entryCount > 0 { + a.Entries = make([]AlterClientQuotasEntryResponse, entryCount) + for i := range a.Entries { + e := AlterClientQuotasEntryResponse{} + if err = e.decode(pd, version); err != nil { + return err + } + a.Entries[i] = e + } + } else { + a.Entries = []AlterClientQuotasEntryResponse{} + } + + return nil +} + +func (a *AlterClientQuotasEntryResponse) encode(pe packetEncoder) error { + // ErrorCode + pe.putInt16(int16(a.ErrorCode)) + + // ErrorMsg + if err := pe.putNullableString(a.ErrorMsg); err != nil { + return err + } + + // Entity + if err := pe.putArrayLength(len(a.Entity)); err != nil { + return err + } + for _, component := range a.Entity { + if err := component.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (a *AlterClientQuotasEntryResponse) decode(pd packetDecoder, version int16) error { + // ErrorCode + errCode, err := pd.getInt16() + if err != nil { + return err + } + a.ErrorCode = KError(errCode) + + // ErrorMsg + errMsg, err := pd.getNullableString() + if err != nil { + return err + } + a.ErrorMsg = errMsg + + // Entity + componentCount, err := pd.getArrayLength() + if err != nil { + return err + } + if componentCount > 0 { + a.Entity = make([]QuotaEntityComponent, componentCount) + for i := 0; i < componentCount; i++ { + component := QuotaEntityComponent{} + if err := component.decode(pd, version); err != nil { + return err + } + a.Entity[i] = component + } + } else { + a.Entity = []QuotaEntityComponent{} + } + + return nil +} + +func (a *AlterClientQuotasResponse) key() int16 { + return 49 +} + +func (a *AlterClientQuotasResponse) version() int16 { + return 0 +} + +func (a *AlterClientQuotasResponse) headerVersion() int16 { + return 0 +} + +func (a *AlterClientQuotasResponse) requiredVersion() KafkaVersion { + return V2_6_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go index cfb6369ac1..84cd86c729 100644 --- a/vendor/github.com/Shopify/sarama/alter_configs_response.go +++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go @@ -61,12 +61,12 @@ func (a *AlterConfigsResourceResponse) encode(pe packetEncoder) error { pe.putInt16(a.ErrorCode) err := pe.putString(a.ErrorMsg) if err != nil { - return nil + return err } pe.putInt8(int8(a.Type)) err = pe.putString(a.Name) if err != nil { - return nil + return err } return nil } diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go index bee92c0e7f..e5b3baf646 100644 --- a/vendor/github.com/Shopify/sarama/api_versions_request.go +++ b/vendor/github.com/Shopify/sarama/api_versions_request.go @@ -1,28 +1,69 @@ package sarama -// ApiVersionsRequest ... -type ApiVersionsRequest struct{} +const defaultClientSoftwareName = "sarama" + +type ApiVersionsRequest struct { + // Version defines the protocol version to use for encode and decode + Version int16 + // ClientSoftwareName contains the name of the client. + ClientSoftwareName string + // ClientSoftwareVersion contains the version of the client. + ClientSoftwareVersion string +} + +func (r *ApiVersionsRequest) encode(pe packetEncoder) (err error) { + if r.Version >= 3 { + if err := pe.putCompactString(r.ClientSoftwareName); err != nil { + return err + } + if err := pe.putCompactString(r.ClientSoftwareVersion); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + } -func (a *ApiVersionsRequest) encode(pe packetEncoder) error { return nil } -func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { +func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.Version >= 3 { + if r.ClientSoftwareName, err = pd.getCompactString(); err != nil { + return err + } + if r.ClientSoftwareVersion, err = pd.getCompactString(); err != nil { + return err + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + return nil } -func (a *ApiVersionsRequest) key() int16 { +func (r *ApiVersionsRequest) key() int16 { return 18 } -func (a *ApiVersionsRequest) version() int16 { - return 0 +func (r *ApiVersionsRequest) version() int16 { + return r.Version } -func (a *ApiVersionsRequest) headerVersion() int16 { +func (r *ApiVersionsRequest) headerVersion() int16 { + if r.Version >= 3 { + return 2 + } return 1 } -func (a *ApiVersionsRequest) requiredVersion() KafkaVersion { - return V0_10_0_0 +func (r *ApiVersionsRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 0: + return V0_10_0_0 + case 3: + return V2_4_0_0 + default: + return V0_10_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go index 0e72e3926a..ade911c597 100644 --- a/vendor/github.com/Shopify/sarama/api_versions_response.go +++ b/vendor/github.com/Shopify/sarama/api_versions_response.go @@ -1,76 +1,130 @@ package sarama -// ApiVersionsResponseBlock is an api version response block type -type ApiVersionsResponseBlock struct { - ApiKey int16 +// ApiVersionsResponseKey contains the APIs supported by the broker. +type ApiVersionsResponseKey struct { + // Version defines the protocol version to use for encode and decode + Version int16 + // ApiKey contains the API index. + ApiKey int16 + // MinVersion contains the minimum supported version, inclusive. MinVersion int16 + // MaxVersion contains the maximum supported version, inclusive. MaxVersion int16 } -func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error { - pe.putInt16(b.ApiKey) - pe.putInt16(b.MinVersion) - pe.putInt16(b.MaxVersion) +func (a *ApiVersionsResponseKey) encode(pe packetEncoder, version int16) (err error) { + a.Version = version + pe.putInt16(a.ApiKey) + + pe.putInt16(a.MinVersion) + + pe.putInt16(a.MaxVersion) + + if version >= 3 { + pe.putEmptyTaggedFieldArray() + } + return nil } -func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error { - var err error - - if b.ApiKey, err = pd.getInt16(); err != nil { +func (a *ApiVersionsResponseKey) decode(pd packetDecoder, version int16) (err error) { + a.Version = version + if a.ApiKey, err = pd.getInt16(); err != nil { return err } - if b.MinVersion, err = pd.getInt16(); err != nil { + if a.MinVersion, err = pd.getInt16(); err != nil { return err } - if b.MaxVersion, err = pd.getInt16(); err != nil { + if a.MaxVersion, err = pd.getInt16(); err != nil { return err } + if version >= 3 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + return nil } -// ApiVersionsResponse is an api version response type type ApiVersionsResponse struct { - Err KError - ApiVersions []*ApiVersionsResponseBlock + // Version defines the protocol version to use for encode and decode + Version int16 + // ErrorCode contains the top-level error code. + ErrorCode int16 + // ApiKeys contains the APIs supported by the broker. + ApiKeys []ApiVersionsResponseKey + // ThrottleTimeMs contains the duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota. + ThrottleTimeMs int32 } -func (r *ApiVersionsResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - if err := pe.putArrayLength(len(r.ApiVersions)); err != nil { - return err +func (r *ApiVersionsResponse) encode(pe packetEncoder) (err error) { + pe.putInt16(r.ErrorCode) + + if r.Version >= 3 { + pe.putCompactArrayLength(len(r.ApiKeys)) + } else { + if err := pe.putArrayLength(len(r.ApiKeys)); err != nil { + return err + } } - for _, apiVersion := range r.ApiVersions { - if err := apiVersion.encode(pe); err != nil { + for _, block := range r.ApiKeys { + if err := block.encode(pe, r.Version); err != nil { return err } } + + if r.Version >= 1 { + pe.putInt32(r.ThrottleTimeMs) + } + + if r.Version >= 3 { + pe.putEmptyTaggedFieldArray() + } + return nil } -func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error { - kerr, err := pd.getInt16() - if err != nil { +func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.ErrorCode, err = pd.getInt16(); err != nil { return err } - r.Err = KError(kerr) + var numApiKeys int + if r.Version >= 3 { + numApiKeys, err = pd.getCompactArrayLength() + if err != nil { + return err + } + } else { + numApiKeys, err = pd.getArrayLength() + if err != nil { + return err + } + } + r.ApiKeys = make([]ApiVersionsResponseKey, numApiKeys) + for i := 0; i < numApiKeys; i++ { + var block ApiVersionsResponseKey + if err = block.decode(pd, r.Version); err != nil { + return err + } + r.ApiKeys[i] = block + } - numBlocks, err := pd.getArrayLength() - if err != nil { - return err + if r.Version >= 1 { + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } } - r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks) - for i := 0; i < numBlocks; i++ { - block := new(ApiVersionsResponseBlock) - if err := block.decode(pd); err != nil { + if r.Version >= 3 { + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { return err } - r.ApiVersions[i] = block } return nil @@ -81,13 +135,22 @@ func (r *ApiVersionsResponse) key() int16 { } func (r *ApiVersionsResponse) version() int16 { - return 0 + return r.Version } -func (a *ApiVersionsResponse) headerVersion() int16 { +func (r *ApiVersionsResponse) headerVersion() int16 { + // ApiVersionsResponse always includes a v0 header. + // See KIP-511 for details return 0 } func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { - return V0_10_0_0 + switch r.Version { + case 0: + return V0_10_0_0 + case 3: + return V2_4_0_0 + default: + return V0_10_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go index 5911f7b58c..c807aea720 100644 --- a/vendor/github.com/Shopify/sarama/async_producer.go +++ b/vendor/github.com/Shopify/sarama/async_producer.go @@ -14,8 +14,8 @@ import ( // to the correct broker for the provided topic-partition, refreshing metadata as appropriate, // and parses responses for errors. You must read from the Errors() channel or the // producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid -// leaks: it will not be garbage-collected automatically when it passes out of -// scope. +// leaks and message lost: it will not be garbage-collected automatically when it passes +// out of scope and buffered messages may not be flushed. type AsyncProducer interface { // AsyncClose triggers a shutdown of the producer. The shutdown has completed @@ -26,7 +26,8 @@ type AsyncProducer interface { // Close shuts down the producer and waits for any buffered messages to be // flushed. You must call this function before a producer object passes out of - // scope, as it may otherwise leak memory. You must call this before calling + // scope, as it may otherwise leak memory. You must call this before process + // shutting down, or you may lose messages. You must call this before calling // Close on the underlying client. Close() error @@ -206,7 +207,7 @@ type ProducerMessage struct { // Partition is the partition that the message was sent to. This is only // guaranteed to be defined if the message was successfully delivered. Partition int32 - // Timestamp can vary in behaviour depending on broker configuration, being + // Timestamp can vary in behavior depending on broker configuration, being // in either one of the CreateTime or LogAppendTime modes (default CreateTime), // and requiring version at least 0.10.0. // diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go index dd01e4ef1f..ed92a669d8 100644 --- a/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/Shopify/sarama/broker.go @@ -49,6 +49,7 @@ type Broker struct { brokerResponseRate metrics.Meter brokerResponseSize metrics.Histogram brokerRequestsInFlight metrics.Counter + brokerThrottleTime metrics.Histogram kerberosAuthenticator GSSAPIKerberosAuth } @@ -149,11 +150,28 @@ func (b *Broker) Open(conf *Config) error { return err } + usingApiVersionsRequests := conf.Version.IsAtLeast(V2_4_0_0) && conf.ApiVersionsRequest + b.lock.Lock() go withRecover(func() { - defer b.lock.Unlock() - + defer func() { + b.lock.Unlock() + + // Send an ApiVersionsRequest to identify the client (KIP-511). + // Ideally Sarama would use the response to control protocol versions, + // but for now just fire-and-forget just to send + if usingApiVersionsRequests { + _, err = b.ApiVersions(&ApiVersionsRequest{ + Version: 3, + ClientSoftwareName: defaultClientSoftwareName, + ClientSoftwareVersion: version(), + }) + if err != nil { + Logger.Printf("Error while sending ApiVersionsRequest to broker %s: %s\n", b.addr, err) + } + } + }() dialer := conf.getDialer() b.conn, b.connErr = dialer.Dial("tcp", b.addr) if b.connErr != nil { @@ -180,7 +198,7 @@ func (b *Broker) Open(conf *Config) error { b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", conf.MetricRegistry) // Do not gather metrics for seeded broker (only used during bootstrap) because they share // the same id (-1) and are already exposed through the global metrics above - if b.id >= 0 { + if b.id >= 0 && !metrics.UseNilMetrics { b.registerMetrics() } @@ -190,7 +208,7 @@ func (b *Broker) Open(conf *Config) error { if b.connErr != nil { err = b.conn.Close() if err == nil { - Logger.Printf("Closed connection to broker %s\n", b.addr) + DebugLogger.Printf("Closed connection to broker %s\n", b.addr) } else { Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) } @@ -204,9 +222,9 @@ func (b *Broker) Open(conf *Config) error { b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1) if b.id >= 0 { - Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id) + DebugLogger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id) } else { - Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr) + DebugLogger.Printf("Connected to broker at %s (unregistered)\n", b.addr) } go withRecover(b.responseReceiver) }) @@ -245,7 +263,7 @@ func (b *Broker) Close() error { b.unregisterMetrics() if err == nil { - Logger.Printf("Closed connection to broker %s\n", b.addr) + DebugLogger.Printf("Closed connection to broker %s\n", b.addr) } else { Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) } @@ -336,6 +354,15 @@ func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { } else { response = new(ProduceResponse) err = b.sendAndReceive(request, response) + if response.ThrottleTime != time.Duration(0) { + DebugLogger.Printf( + "producer/broker/%d ProduceResponse throttled %v\n", + b.ID(), response.ThrottleTime) + if b.brokerThrottleTime != nil { + throttleTimeInMs := int64(response.ThrottleTime / time.Millisecond) + b.brokerThrottleTime.Update(throttleTimeInMs) + } + } } if err != nil { @@ -689,6 +716,17 @@ func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsRespon return response, nil } +// DeleteOffsets sends a request to delete group offsets and returns a response or error +func (b *Broker) DeleteOffsets(request *DeleteOffsetsRequest) (*DeleteOffsetsResponse, error) { + response := new(DeleteOffsetsResponse) + + if err := b.sendAndReceive(request, response); err != nil { + return nil, err + } + + return response, nil +} + // DescribeLogDirs sends a request to get the broker's log dir paths and sizes func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) { response := new(DescribeLogDirsResponse) @@ -724,6 +762,30 @@ func (b *Broker) AlterUserScramCredentials(req *AlterUserScramCredentialsRequest return res, nil } +// DescribeClientQuotas sends a request to get the broker's quotas +func (b *Broker) DescribeClientQuotas(request *DescribeClientQuotasRequest) (*DescribeClientQuotasResponse, error) { + response := new(DescribeClientQuotasResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// AlterClientQuotas sends a request to alter the broker's quotas +func (b *Broker) AlterClientQuotas(request *AlterClientQuotasRequest) (*AlterClientQuotasResponse, error) { + response := new(AlterClientQuotasResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + // readFull ensures the conn ReadDeadline has been setup before making a // call to io.ReadFull func (b *Broker) readFull(buf []byte) (n int, err error) { @@ -941,7 +1003,7 @@ func (b *Broker) authenticateViaSASL() error { case SASLTypeOAuth: return b.sendAndReceiveSASLOAuth(b.conf.Net.SASL.TokenProvider) case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: - return b.sendAndReceiveSASLSCRAMv1() + return b.sendAndReceiveSASLSCRAM() case SASLTypeGSSAPI: return b.sendAndReceiveKerberos() default: @@ -1009,7 +1071,7 @@ func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int return res.Err } - Logger.Print("Successful SASL handshake. Available mechanisms: ", res.EnabledMechanisms) + DebugLogger.Print("Successful SASL handshake. Available mechanisms: ", res.EnabledMechanisms) return nil } @@ -1082,7 +1144,7 @@ func (b *Broker) sendAndReceiveV0SASLPlainAuth() error { return err } - Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header) + DebugLogger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header) return nil } @@ -1180,6 +1242,70 @@ func (b *Broker) sendClientMessage(message []byte) (bool, error) { return isChallenge, err } +func (b *Broker) sendAndReceiveSASLSCRAM() error { + if b.conf.Net.SASL.Version == SASLHandshakeV0 { + return b.sendAndReceiveSASLSCRAMv0() + } + return b.sendAndReceiveSASLSCRAMv1() +} + +func (b *Broker) sendAndReceiveSASLSCRAMv0() error { + if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV0); err != nil { + return err + } + + scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc() + if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil { + return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error()) + } + + msg, err := scramClient.Step("") + if err != nil { + return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error()) + } + + for !scramClient.Done() { + requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + length := len(msg) + authBytes := make([]byte, length+4) //4 byte length header + auth data + binary.BigEndian.PutUint32(authBytes, uint32(length)) + copy(authBytes[4:], []byte(msg)) + _, err := b.write(authBytes) + b.updateOutgoingCommunicationMetrics(length + 4) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) + return err + } + b.correlationID++ + header := make([]byte, 4) + _, err = b.readFull(header) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to read response header while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) + return err + } + payload := make([]byte, int32(binary.BigEndian.Uint32(header))) + n, err := b.readFull(payload) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to read response payload while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) + return err + } + b.updateIncomingCommunicationMetrics(n+4, time.Since(requestTime)) + msg, err = scramClient.Step(string(payload)) + if err != nil { + Logger.Println("SASL authentication failed", err) + return err + } + } + + DebugLogger.Println("SASL authentication succeeded") + return nil +} + func (b *Broker) sendAndReceiveSASLSCRAMv1() error { if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV1); err != nil { return err @@ -1224,7 +1350,7 @@ func (b *Broker) sendAndReceiveSASLSCRAMv1() error { } } - Logger.Println("SASL authentication succeeded") + DebugLogger.Println("SASL authentication succeeded") return nil } @@ -1428,6 +1554,7 @@ func (b *Broker) registerMetrics() { b.brokerResponseRate = b.registerMeter("response-rate") b.brokerResponseSize = b.registerHistogram("response-size") b.brokerRequestsInFlight = b.registerCounter("requests-in-flight") + b.brokerThrottleTime = b.registerHistogram("throttle-time-in-ms") } func (b *Broker) unregisterMetrics() { diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go index c0918ba355..1c69cb8d60 100644 --- a/vendor/github.com/Shopify/sarama/client.go +++ b/vendor/github.com/Shopify/sarama/client.go @@ -138,7 +138,7 @@ type client struct { // and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot // be retrieved from any of the given broker addresses, the client is not created. func NewClient(addrs []string, conf *Config) (Client, error) { - Logger.Println("Initializing new client") + DebugLogger.Println("Initializing new client") if conf == nil { conf = NewConfig() @@ -182,7 +182,7 @@ func NewClient(addrs []string, conf *Config) (Client, error) { } go withRecover(client.backgroundMetadataUpdater) - Logger.Println("Successfully initialized new client") + DebugLogger.Println("Successfully initialized new client") return client, nil } @@ -213,11 +213,12 @@ func (client *client) Broker(brokerID int32) (*Broker, error) { } func (client *client) InitProducerID() (*InitProducerIDResponse, error) { - var err error + err := ErrOutOfBrokers for broker := client.any(); broker != nil; broker = client.any() { + var response *InitProducerIDResponse req := &InitProducerIDRequest{} - response, err := broker.InitProducerID(req) + response, err = broker.InitProducerID(req) switch err.(type) { case nil: return response, nil @@ -228,6 +229,7 @@ func (client *client) InitProducerID() (*InitProducerIDResponse, error) { client.deregisterBroker(broker) } } + return nil, err } @@ -245,7 +247,7 @@ func (client *client) Close() error { client.lock.Lock() defer client.lock.Unlock() - Logger.Println("Closing Client") + DebugLogger.Println("Closing Client") for _, broker := range client.brokers { safeAsyncClose(broker) @@ -612,7 +614,7 @@ func (client *client) updateBroker(brokers []*Broker) { currentBroker[broker.ID()] = broker if client.brokers[broker.ID()] == nil { // add new broker client.brokers[broker.ID()] = broker - Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) + DebugLogger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) } else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address safeAsyncClose(client.brokers[broker.ID()]) client.brokers[broker.ID()] = broker @@ -640,7 +642,7 @@ func (client *client) registerBroker(broker *Broker) { if client.brokers[broker.ID()] == nil { client.brokers[broker.ID()] = broker - Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) + DebugLogger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) } else if broker.Addr() != client.brokers[broker.ID()].Addr() { safeAsyncClose(client.brokers[broker.ID()]) client.brokers[broker.ID()] = broker @@ -662,7 +664,7 @@ func (client *client) deregisterBroker(broker *Broker) { // but we really shouldn't have to; once that loop is made better this case can be // removed, and the function generally can be renamed from `deregisterBroker` to // `nextSeedBroker` or something - Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) + DebugLogger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) delete(client.brokers, broker.ID()) } } @@ -876,12 +878,12 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, broker := client.any() for ; broker != nil && !pastDeadline(0); broker = client.any() { - allowAutoTopicCreation := true + allowAutoTopicCreation := client.conf.Metadata.AllowAutoTopicCreation if len(topics) > 0 { - Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) + DebugLogger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) } else { allowAutoTopicCreation = false - Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) + DebugLogger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) } req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation} @@ -1045,7 +1047,7 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin } for broker := client.any(); broker != nil; broker = client.any() { - Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr()) + DebugLogger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr()) request := new(FindCoordinatorRequest) request.CoordinatorKey = consumerGroup @@ -1067,7 +1069,7 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin switch response.Err { case ErrNoError: - Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr()) + DebugLogger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr()) return response, nil case ErrConsumerCoordinatorNotAvailable: diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go index 43e739cad9..3f86f1fb9c 100644 --- a/vendor/github.com/Shopify/sarama/config.go +++ b/vendor/github.com/Shopify/sarama/config.go @@ -4,7 +4,7 @@ import ( "compress/gzip" "crypto/tls" "fmt" - "io/ioutil" + "io" "net" "regexp" "time" @@ -148,6 +148,11 @@ type Config struct { // `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max` // to fail. Timeout time.Duration + + // Whether to allow auto-create topics in metadata refresh. If set to true, + // the broker may auto-create topics that we requested which do not already exist, + // if it is configured to do so (`auto.create.topics.enable` is true). Defaults to true. + AllowAutoTopicCreation bool } // Producer is the namespace for configuration related to producing messages, @@ -422,6 +427,11 @@ type Config struct { // in the background while user code is working, greatly improving throughput. // Defaults to 256. ChannelBufferSize int + // ApiVersionsRequest determines whether Sarama should send an + // ApiVersionsRequest message to each broker as part of its initial + // connection. This defaults to `true` to match the official Java client + // and most 3rdparty ones. + ApiVersionsRequest bool // The version of Kafka that Sarama will assume it is running against. // Defaults to the oldest supported stable version. Since Kafka provides // backwards-compatibility, setting it to a version older than you have @@ -456,6 +466,7 @@ func NewConfig() *Config { c.Metadata.Retry.Backoff = 250 * time.Millisecond c.Metadata.RefreshFrequency = 10 * time.Minute c.Metadata.Full = true + c.Metadata.AllowAutoTopicCreation = true c.Producer.MaxMessageBytes = 1000000 c.Producer.RequiredAcks = WaitForLocal @@ -486,6 +497,7 @@ func NewConfig() *Config { c.ClientID = defaultClientID c.ChannelBufferSize = 256 + c.ApiVersionsRequest = true c.Version = DefaultVersion c.MetricRegistry = metrics.NewRegistry() @@ -663,7 +675,7 @@ func (c *Config) Validate() error { if c.Producer.Compression == CompressionGZIP { if c.Producer.CompressionLevel != CompressionLevelDefault { - if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil { + if _, err := gzip.NewWriterLevel(io.Discard, c.Producer.CompressionLevel); err != nil { return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err)) } } diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go index f9cd172b47..2d76e8a1e8 100644 --- a/vendor/github.com/Shopify/sarama/consumer.go +++ b/vendor/github.com/Shopify/sarama/consumer.go @@ -373,7 +373,7 @@ func (child *partitionConsumer) preferredBroker() (*Broker, error) { } } - // if prefered replica cannot be found fallback to leader + // if preferred replica cannot be found fallback to leader return child.consumer.client.Leader(child.topic, child.partition) } @@ -845,7 +845,7 @@ func (bc *brokerConsumer) handleResponses() { if result == nil { if preferredBroker, err := child.preferredBroker(); err == nil { if bc.broker.ID() != preferredBroker.ID() { - // not an error but needs redispatching to consume from prefered replica + // not an error but needs redispatching to consume from preferred replica child.trigger <- none{} delete(bc.subscriptions, child) } diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go index 2bf236ae53..b603d1705a 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group.go +++ b/vendor/github.com/Shopify/sarama/consumer_group.go @@ -7,6 +7,8 @@ import ( "sort" "sync" "time" + + "github.com/rcrowley/go-metrics" ) // ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed. @@ -28,7 +30,7 @@ type ConsumerGroup interface { // in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected // from concurrent reads/writes. // 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the - // parent context is cancelled or when a server-side rebalance cycle is initiated. + // parent context is canceled or when a server-side rebalance cycle is initiated. // 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called // to allow the user to perform any final tasks before a rebalance. // 6. Finally, marked offsets are committed one last time before claims are released. @@ -112,6 +114,7 @@ func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) { groupID: groupID, errors: make(chan error, config.ChannelBufferSize), closed: make(chan none), + userData: config.Consumer.Group.Member.UserData, }, nil } @@ -212,12 +215,38 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler return c.retryNewSession(ctx, topics, handler, retries, true) } + var ( + metricRegistry = c.config.MetricRegistry + consumerGroupJoinTotal metrics.Counter + consumerGroupJoinFailed metrics.Counter + consumerGroupSyncTotal metrics.Counter + consumerGroupSyncFailed metrics.Counter + ) + + if metricRegistry != nil { + consumerGroupJoinTotal = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-join-total-%s", c.groupID), metricRegistry) + consumerGroupJoinFailed = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-join-failed-%s", c.groupID), metricRegistry) + consumerGroupSyncTotal = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-sync-total-%s", c.groupID), metricRegistry) + consumerGroupSyncFailed = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-sync-failed-%s", c.groupID), metricRegistry) + } + // Join consumer group join, err := c.joinGroupRequest(coordinator, topics) + if consumerGroupJoinTotal != nil { + consumerGroupJoinTotal.Inc(1) + } if err != nil { _ = coordinator.Close() + if consumerGroupJoinFailed != nil { + consumerGroupJoinFailed.Inc(1) + } return nil, err } + if join.Err != ErrNoError { + if consumerGroupJoinFailed != nil { + consumerGroupJoinFailed.Inc(1) + } + } switch join.Err { case ErrNoError: c.memberID = join.MemberId @@ -256,10 +285,22 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler // Sync consumer group groupRequest, err := c.syncGroupRequest(coordinator, plan, join.GenerationId) + if consumerGroupSyncTotal != nil { + consumerGroupSyncTotal.Inc(1) + } if err != nil { _ = coordinator.Close() + if consumerGroupSyncFailed != nil { + consumerGroupSyncFailed.Inc(1) + } return nil, err } + if groupRequest.Err != ErrNoError { + if consumerGroupSyncFailed != nil { + consumerGroupSyncFailed.Inc(1) + } + } + switch groupRequest.Err { case ErrNoError: case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately @@ -289,7 +330,15 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler return nil, err } claims = members.Topics - c.userData = members.UserData + + // in the case of stateful balance strategies, hold on to the returned + // assignment metadata, otherwise, reset the statically defined conusmer + // group metadata + if members.UserData != nil { + c.userData = members.UserData + } else { + c.userData = c.config.Consumer.Group.Member.UserData + } for _, partitions := range claims { sort.Sort(int32Slice(partitions)) @@ -311,14 +360,9 @@ func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) ( req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond) } - // use static user-data if configured, otherwise use consumer-group userdata from the last sync - userData := c.config.Consumer.Group.Member.UserData - if len(userData) == 0 { - userData = c.userData - } meta := &ConsumerGroupMemberMetadata{ Topics: topics, - UserData: userData, + UserData: c.userData, } strategy := c.config.Consumer.Group.Rebalance.Strategy if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil { diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go index af45fdaf94..5565e36cfa 100644 --- a/vendor/github.com/Shopify/sarama/decompress.go +++ b/vendor/github.com/Shopify/sarama/decompress.go @@ -4,7 +4,7 @@ import ( "bytes" "compress/gzip" "fmt" - "io/ioutil" + "io" "sync" snappy "github.com/eapache/go-xerial-snappy" @@ -40,7 +40,7 @@ func decompress(cc CompressionCodec, data []byte) ([]byte, error) { defer gzipReaderPool.Put(reader) - return ioutil.ReadAll(reader) + return io.ReadAll(reader) case CompressionSnappy: return snappy.Decode(data) case CompressionLZ4: @@ -52,7 +52,7 @@ func decompress(cc CompressionCodec, data []byte) ([]byte, error) { } defer lz4ReaderPool.Put(reader) - return ioutil.ReadAll(reader) + return io.ReadAll(reader) case CompressionZSTD: return zstdDecompress(nil, data) default: diff --git a/vendor/github.com/Shopify/sarama/delete_offsets_request.go b/vendor/github.com/Shopify/sarama/delete_offsets_request.go new file mode 100644 index 0000000000..339c7857ca --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_offsets_request.go @@ -0,0 +1,92 @@ +package sarama + +type DeleteOffsetsRequest struct { + Group string + partitions map[string][]int32 +} + +func (r *DeleteOffsetsRequest) encode(pe packetEncoder) (err error) { + err = pe.putString(r.Group) + if err != nil { + return err + } + + if r.partitions == nil { + pe.putInt32(0) + } else { + if err = pe.putArrayLength(len(r.partitions)); err != nil { + return err + } + } + for topic, partitions := range r.partitions { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putInt32Array(partitions) + if err != nil { + return err + } + } + return +} + +func (r *DeleteOffsetsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Group, err = pd.getString() + if err != nil { + return err + } + var partitionCount int + + partitionCount, err = pd.getArrayLength() + if err != nil { + return err + } + + if (partitionCount == 0 && version < 2) || partitionCount < 0 { + return nil + } + + r.partitions = make(map[string][]int32, partitionCount) + for i := 0; i < partitionCount; i++ { + var topic string + topic, err = pd.getString() + if err != nil { + return err + } + + var partitions []int32 + partitions, err = pd.getInt32Array() + if err != nil { + return err + } + + r.partitions[topic] = partitions + } + + return nil +} + +func (r *DeleteOffsetsRequest) key() int16 { + return 47 +} + +func (r *DeleteOffsetsRequest) version() int16 { + return 0 +} + +func (r *DeleteOffsetsRequest) headerVersion() int16 { + return 1 +} + +func (r *DeleteOffsetsRequest) requiredVersion() KafkaVersion { + return V2_4_0_0 +} + +func (r *DeleteOffsetsRequest) AddPartition(topic string, partitionID int32) { + if r.partitions == nil { + r.partitions = make(map[string][]int32) + } + + r.partitions[topic] = append(r.partitions[topic], partitionID) +} diff --git a/vendor/github.com/Shopify/sarama/delete_offsets_response.go b/vendor/github.com/Shopify/sarama/delete_offsets_response.go new file mode 100644 index 0000000000..00c3eaf906 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_offsets_response.go @@ -0,0 +1,112 @@ +package sarama + +import ( + "time" +) + +type DeleteOffsetsResponse struct { + //The top-level error code, or 0 if there was no error. + ErrorCode KError + ThrottleTime time.Duration + //The responses for each partition of the topics. + Errors map[string]map[int32]KError +} + +func (r *DeleteOffsetsResponse) AddError(topic string, partition int32, errorCode KError) { + if r.Errors == nil { + r.Errors = make(map[string]map[int32]KError) + } + partitions := r.Errors[topic] + if partitions == nil { + partitions = make(map[int32]KError) + r.Errors[topic] = partitions + } + partitions[partition] = errorCode +} + +func (r *DeleteOffsetsResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.ErrorCode)) + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(r.Errors)); err != nil { + return err + } + for topic, partitions := range r.Errors { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, errorCode := range partitions { + pe.putInt32(partition) + pe.putInt16(int16(errorCode)) + } + } + return nil +} + +func (r *DeleteOffsetsResponse) decode(pd packetDecoder, version int16) error { + tmpErr, err := pd.getInt16() + if err != nil { + return err + } + r.ErrorCode = KError(tmpErr) + + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + numTopics, err := pd.getArrayLength() + if err != nil || numTopics == 0 { + return err + } + + r.Errors = make(map[string]map[int32]KError, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numErrors, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Errors[name] = make(map[int32]KError, numErrors) + + for j := 0; j < numErrors; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + r.Errors[name][id] = KError(tmp) + } + } + + return nil +} + +func (r *DeleteOffsetsResponse) key() int16 { + return 47 +} + +func (r *DeleteOffsetsResponse) version() int16 { + return 0 +} + +func (r *DeleteOffsetsResponse) headerVersion() int16 { + return 0 +} + +func (r *DeleteOffsetsResponse) requiredVersion() KafkaVersion { + return V2_4_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/describe_client_quotas_request.go b/vendor/github.com/Shopify/sarama/describe_client_quotas_request.go new file mode 100644 index 0000000000..17a82051c5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_client_quotas_request.go @@ -0,0 +1,141 @@ +package sarama + +// DescribeClientQuotas Request (Version: 0) => [components] strict +// components => entity_type match_type match +// entity_type => STRING +// match_type => INT8 +// match => NULLABLE_STRING +// strict => BOOLEAN + +// A filter to be applied to matching client quotas. +// Components: the components to filter on +// Strict: whether the filter only includes specified components +type DescribeClientQuotasRequest struct { + Components []QuotaFilterComponent + Strict bool +} + +// Describe a component for applying a client quota filter. +// EntityType: the entity type the filter component applies to ("user", "client-id", "ip") +// MatchType: the match type of the filter component (any, exact, default) +// Match: the name that's matched exactly (used when MatchType is QuotaMatchExact) +type QuotaFilterComponent struct { + EntityType QuotaEntityType + MatchType QuotaMatchType + Match string +} + +func (d *DescribeClientQuotasRequest) encode(pe packetEncoder) error { + // Components + if err := pe.putArrayLength(len(d.Components)); err != nil { + return err + } + for _, c := range d.Components { + if err := c.encode(pe); err != nil { + return err + } + } + + // Strict + pe.putBool(d.Strict) + + return nil +} + +func (d *DescribeClientQuotasRequest) decode(pd packetDecoder, version int16) error { + // Components + componentCount, err := pd.getArrayLength() + if err != nil { + return err + } + if componentCount > 0 { + d.Components = make([]QuotaFilterComponent, componentCount) + for i := range d.Components { + c := QuotaFilterComponent{} + if err = c.decode(pd, version); err != nil { + return err + } + d.Components[i] = c + } + } else { + d.Components = []QuotaFilterComponent{} + } + + // Strict + strict, err := pd.getBool() + if err != nil { + return err + } + d.Strict = strict + + return nil +} + +func (d *QuotaFilterComponent) encode(pe packetEncoder) error { + // EntityType + if err := pe.putString(string(d.EntityType)); err != nil { + return err + } + + // MatchType + pe.putInt8(int8(d.MatchType)) + + // Match + if d.MatchType == QuotaMatchAny { + if err := pe.putNullableString(nil); err != nil { + return err + } + } else if d.MatchType == QuotaMatchDefault { + if err := pe.putString(""); err != nil { + return err + } + } else { + if err := pe.putString(d.Match); err != nil { + return err + } + } + + return nil +} + +func (d *QuotaFilterComponent) decode(pd packetDecoder, version int16) error { + // EntityType + entityType, err := pd.getString() + if err != nil { + return err + } + d.EntityType = QuotaEntityType(entityType) + + // MatchType + matchType, err := pd.getInt8() + if err != nil { + return err + } + d.MatchType = QuotaMatchType(matchType) + + // Match + match, err := pd.getNullableString() + if err != nil { + return err + } + if match != nil { + d.Match = *match + } + return nil +} + +func (d *DescribeClientQuotasRequest) key() int16 { + return 48 +} + +func (d *DescribeClientQuotasRequest) version() int16 { + return 0 +} + +func (d *DescribeClientQuotasRequest) headerVersion() int16 { + return 1 +} + +func (d *DescribeClientQuotasRequest) requiredVersion() KafkaVersion { + return V2_6_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/describe_client_quotas_response.go b/vendor/github.com/Shopify/sarama/describe_client_quotas_response.go new file mode 100644 index 0000000000..555da0c485 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_client_quotas_response.go @@ -0,0 +1,235 @@ +package sarama + +import ( + "time" +) + +// DescribeClientQuotas Response (Version: 0) => throttle_time_ms error_code error_message [entries] +// throttle_time_ms => INT32 +// error_code => INT16 +// error_message => NULLABLE_STRING +// entries => [entity] [values] +// entity => entity_type entity_name +// entity_type => STRING +// entity_name => NULLABLE_STRING +// values => key value +// key => STRING +// value => FLOAT64 + +type DescribeClientQuotasResponse struct { + ThrottleTime time.Duration // The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota. + ErrorCode KError // The error code, or `0` if the quota description succeeded. + ErrorMsg *string // The error message, or `null` if the quota description succeeded. + Entries []DescribeClientQuotasEntry // A result entry. +} + +type DescribeClientQuotasEntry struct { + Entity []QuotaEntityComponent // The quota entity description. + Values map[string]float64 // The quota values for the entity. +} + +type QuotaEntityComponent struct { + EntityType QuotaEntityType + MatchType QuotaMatchType + Name string +} + +func (d *DescribeClientQuotasResponse) encode(pe packetEncoder) error { + // ThrottleTime + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + + // ErrorCode + pe.putInt16(int16(d.ErrorCode)) + + // ErrorMsg + if err := pe.putNullableString(d.ErrorMsg); err != nil { + return err + } + + // Entries + if err := pe.putArrayLength(len(d.Entries)); err != nil { + return err + } + for _, e := range d.Entries { + if err := e.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (d *DescribeClientQuotasResponse) decode(pd packetDecoder, version int16) error { + // ThrottleTime + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + // ErrorCode + errCode, err := pd.getInt16() + if err != nil { + return err + } + d.ErrorCode = KError(errCode) + + // ErrorMsg + errMsg, err := pd.getNullableString() + if err != nil { + return err + } + d.ErrorMsg = errMsg + + // Entries + entryCount, err := pd.getArrayLength() + if err != nil { + return err + } + if entryCount > 0 { + d.Entries = make([]DescribeClientQuotasEntry, entryCount) + for i := range d.Entries { + e := DescribeClientQuotasEntry{} + if err = e.decode(pd, version); err != nil { + return err + } + d.Entries[i] = e + } + } else { + d.Entries = []DescribeClientQuotasEntry{} + } + + return nil +} + +func (d *DescribeClientQuotasEntry) encode(pe packetEncoder) error { + // Entity + if err := pe.putArrayLength(len(d.Entity)); err != nil { + return err + } + for _, e := range d.Entity { + if err := e.encode(pe); err != nil { + return err + } + } + + // Values + if err := pe.putArrayLength(len(d.Values)); err != nil { + return err + } + for key, value := range d.Values { + // key + if err := pe.putString(key); err != nil { + return err + } + // value + pe.putFloat64(value) + } + + return nil +} + +func (d *DescribeClientQuotasEntry) decode(pd packetDecoder, version int16) error { + // Entity + componentCount, err := pd.getArrayLength() + if err != nil { + return err + } + if componentCount > 0 { + d.Entity = make([]QuotaEntityComponent, componentCount) + for i := 0; i < componentCount; i++ { + component := QuotaEntityComponent{} + if err := component.decode(pd, version); err != nil { + return err + } + d.Entity[i] = component + } + } else { + d.Entity = []QuotaEntityComponent{} + } + + // Values + valueCount, err := pd.getArrayLength() + if err != nil { + return err + } + if valueCount > 0 { + d.Values = make(map[string]float64, valueCount) + for i := 0; i < valueCount; i++ { + // key + key, err := pd.getString() + if err != nil { + return err + } + // value + value, err := pd.getFloat64() + if err != nil { + return err + } + d.Values[key] = value + } + } else { + d.Values = map[string]float64{} + } + + return nil +} + +func (c *QuotaEntityComponent) encode(pe packetEncoder) error { + // entity_type + if err := pe.putString(string(c.EntityType)); err != nil { + return err + } + // entity_name + if c.MatchType == QuotaMatchDefault { + if err := pe.putNullableString(nil); err != nil { + return err + } + } else { + if err := pe.putString(c.Name); err != nil { + return err + } + } + + return nil +} + +func (c *QuotaEntityComponent) decode(pd packetDecoder, version int16) error { + // entity_type + entityType, err := pd.getString() + if err != nil { + return err + } + c.EntityType = QuotaEntityType(entityType) + + // entity_name + entityName, err := pd.getNullableString() + if err != nil { + return err + } + + if entityName == nil { + c.MatchType = QuotaMatchDefault + } else { + c.MatchType = QuotaMatchExact + c.Name = *entityName + } + + return nil +} + +func (d *DescribeClientQuotasResponse) key() int16 { + return 48 +} + +func (d *DescribeClientQuotasResponse) version() int16 { + return 0 +} + +func (d *DescribeClientQuotasResponse) headerVersion() int16 { + return 0 +} + +func (d *DescribeClientQuotasResponse) requiredVersion() KafkaVersion { + return V2_6_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go index 928f5a52ab..4968f4854a 100644 --- a/vendor/github.com/Shopify/sarama/describe_configs_response.go +++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go @@ -308,19 +308,19 @@ func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) { func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error { name, err := pd.getString() if err != nil { - return nil + return err } c.ConfigName = name value, err := pd.getString() if err != nil { - return nil + return err } c.ConfigValue = value source, err := pd.getInt8() if err != nil { - return nil + return err } c.Source = ConfigSource(source) return nil diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml index 7bf9ff9184..05448a884f 100644 --- a/vendor/github.com/Shopify/sarama/dev.yml +++ b/vendor/github.com/Shopify/sarama/dev.yml @@ -2,7 +2,7 @@ name: sarama up: - go: - version: '1.16' + version: '1.17.1' commands: test: diff --git a/vendor/github.com/Shopify/sarama/docker-compose.yml b/vendor/github.com/Shopify/sarama/docker-compose.yml index 8e9c24e3db..744ed3cf87 100644 --- a/vendor/github.com/Shopify/sarama/docker-compose.yml +++ b/vendor/github.com/Shopify/sarama/docker-compose.yml @@ -1,7 +1,7 @@ version: '3.7' services: zookeeper-1: - image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}' + image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.2.0}' restart: always environment: ZOOKEEPER_SERVER_ID: '1' @@ -13,7 +13,7 @@ services: ZOOKEEPER_SYNC_LIMIT: '5' ZOOKEEPER_MAX_CLIENT_CONNS: '0' zookeeper-2: - image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}' + image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.2.0}' restart: always environment: ZOOKEEPER_SERVER_ID: '2' @@ -25,7 +25,7 @@ services: ZOOKEEPER_SYNC_LIMIT: '5' ZOOKEEPER_MAX_CLIENT_CONNS: '0' zookeeper-3: - image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}' + image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.2.0}' restart: always environment: ZOOKEEPER_SERVER_ID: '3' @@ -37,7 +37,7 @@ services: ZOOKEEPER_SYNC_LIMIT: '5' ZOOKEEPER_MAX_CLIENT_CONNS: '0' kafka-1: - image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}' + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.2.0}' restart: always environment: KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' @@ -54,7 +54,7 @@ services: KAFKA_DELETE_TOPIC_ENABLE: 'true' KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' kafka-2: - image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}' + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.2.0}' restart: always environment: KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' @@ -71,7 +71,7 @@ services: KAFKA_DELETE_TOPIC_ENABLE: 'true' KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' kafka-3: - image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}' + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.2.0}' restart: always environment: KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' @@ -88,7 +88,7 @@ services: KAFKA_DELETE_TOPIC_ENABLE: 'true' KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' kafka-4: - image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}' + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.2.0}' restart: always environment: KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' @@ -105,7 +105,7 @@ services: KAFKA_DELETE_TOPIC_ENABLE: 'true' KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' kafka-5: - image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}' + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.2.0}' restart: always environment: KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' @@ -122,7 +122,7 @@ services: KAFKA_DELETE_TOPIC_ENABLE: 'true' KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' toxiproxy: - image: 'shopify/toxiproxy:2.1.4' + image: 'ghcr.io/shopify/toxiproxy:2.1.5' ports: # The tests themselves actually start the proxies on these ports - '29091:29091' diff --git a/vendor/github.com/Shopify/sarama/go.mod b/vendor/github.com/Shopify/sarama/go.mod index ccbd8e2d13..d1fdd2e4c4 100644 --- a/vendor/github.com/Shopify/sarama/go.mod +++ b/vendor/github.com/Shopify/sarama/go.mod @@ -3,25 +3,25 @@ module github.com/Shopify/sarama go 1.13 require ( - github.com/Shopify/toxiproxy v2.1.4+incompatible + github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae github.com/davecgh/go-spew v1.1.1 github.com/eapache/go-resiliency v1.2.0 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 github.com/eapache/queue v1.1.0 github.com/fortytw2/leaktest v1.3.0 github.com/frankban/quicktest v1.11.3 // indirect + github.com/golang/snappy v0.0.4 // indirect github.com/jcmturner/gofork v1.0.0 github.com/jcmturner/gokrb5/v8 v8.4.2 - github.com/klauspost/compress v1.12.2 + github.com/klauspost/compress v1.13.6 github.com/kr/text v0.2.0 // indirect - github.com/pierrec/lz4 v2.6.0+incompatible + github.com/pierrec/lz4 v2.6.1+incompatible github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/stretchr/testify v1.7.0 - github.com/xdg/scram v1.0.3 - github.com/xdg/stringprep v1.0.3 // indirect - golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect - golang.org/x/net v0.0.0-20210614182718-04defd469f4e - golang.org/x/text v0.3.6 // indirect + github.com/xdg-go/scram v1.0.2 + golang.org/x/crypto v0.0.0-20210920023735-84f357641f63 // indirect + golang.org/x/net v0.0.0-20210917221730-978cfadd31cf + golang.org/x/text v0.3.7 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect diff --git a/vendor/github.com/Shopify/sarama/go.sum b/vendor/github.com/Shopify/sarama/go.sum index a49776049d..e91c009a81 100644 --- a/vendor/github.com/Shopify/sarama/go.sum +++ b/vendor/github.com/Shopify/sarama/go.sum @@ -1,5 +1,7 @@ -github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae h1:ePgznFqEG1v3AjMklnK8H7BSc++FDSo7xfK9K7Af+0Y= +github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae/go.mod h1:/cvHQkZ1fst0EmZnA5dFtiQdWCNCFYzb+uE2vqVgvx0= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -14,10 +16,11 @@ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8 github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= @@ -36,50 +39,58 @@ github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJz github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= -github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8= -github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= -github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/xdg/scram v1.0.3 h1:nTadYh2Fs4BK2xdldEa2g5bbaZp0/+1nJMMPtPxS/to= -github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= -github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210920023735-84f357641f63 h1:kETrAMYZq6WVGPa8IIixL0CaEcIUNi+1WX7grUoi3y8= +golang.org/x/crypto v0.0.0-20210920023735-84f357641f63/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a h1:njMmldwFTyDLqonHMagNXKBWptTBeDZOdblgaDsNEGQ= -golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210917221730-978cfadd31cf h1:R150MpwJIv1MpS0N/pc+NhTM8ajzvlmxlY5OYsrevXQ= +golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= @@ -87,7 +98,9 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go index c2654d12ed..9166f6efbc 100644 --- a/vendor/github.com/Shopify/sarama/mockbroker.go +++ b/vendor/github.com/Shopify/sarama/mockbroker.go @@ -30,9 +30,9 @@ type RequestNotifierFunc func(bytesRead, bytesWritten int) // to facilitate testing of higher level or specialized consumers and producers // built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol, // but rather provides a facility to do that. It takes care of the TCP -// transport, request unmarshalling, response marshalling, and makes it the test +// transport, request unmarshalling, response marshaling, and makes it the test // writer responsibility to program correct according to the Kafka API protocol -// MockBroker behaviour. +// MockBroker behavior. // // MockBroker is implemented as a TCP server listening on a kernel-selected // localhost port that can accept many connections. It reads Kafka requests @@ -83,9 +83,13 @@ func (b *MockBroker) SetLatency(latency time.Duration) { // and uses the found MockResponse instance to generate an appropriate reply. // If the request type is not found in the map then nothing is sent. func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { + fnMap := make(map[string]MockResponse) + for k, v := range handlerMap { + fnMap[k] = v + } b.setHandler(func(req *request) (res encoderWithHeader) { reqTypeName := reflect.TypeOf(req.body).Elem().Name() - mockResponse := handlerMap[reqTypeName] + mockResponse := fnMap[reqTypeName] if mockResponse == nil { return nil } diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go index 6654ed07c3..a5e94fb39f 100644 --- a/vendor/github.com/Shopify/sarama/mockresponses.go +++ b/vendor/github.com/Shopify/sarama/mockresponses.go @@ -1082,6 +1082,35 @@ func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHead return resp } +type MockDeleteOffsetResponse struct { + errorCode KError + topic string + partition int32 + errorPartition KError +} + +func NewMockDeleteOffsetRequest(t TestReporter) *MockDeleteOffsetResponse { + return &MockDeleteOffsetResponse{} +} + +func (m *MockDeleteOffsetResponse) SetDeletedOffset(errorCode KError, topic string, partition int32, errorPartition KError) *MockDeleteOffsetResponse { + m.errorCode = errorCode + m.topic = topic + m.partition = partition + m.errorPartition = errorPartition + return m +} + +func (m *MockDeleteOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &DeleteOffsetsResponse{ + ErrorCode: m.errorCode, + Errors: map[string]map[int32]KError{ + m.topic: {m.partition: m.errorPartition}, + }, + } + return resp +} + type MockJoinGroupResponse struct { t TestReporter @@ -1271,3 +1300,33 @@ func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithH } return resp } + +type MockApiVersionsResponse struct { + t TestReporter +} + +func NewMockApiVersionsResponse(t TestReporter) *MockApiVersionsResponse { + return &MockApiVersionsResponse{t: t} +} + +func (mr *MockApiVersionsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*ApiVersionsRequest) + res := &ApiVersionsResponse{ + Version: req.Version, + ApiKeys: []ApiVersionsResponseKey{ + { + Version: req.Version, + ApiKey: 0, + MinVersion: 5, + MaxVersion: 8, + }, + { + Version: req.Version, + ApiKey: 1, + MinVersion: 7, + MaxVersion: 11, + }, + }, + } + return res +} diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go index 184bc26ae9..08b4332234 100644 --- a/vendor/github.com/Shopify/sarama/packet_decoder.go +++ b/vendor/github.com/Shopify/sarama/packet_decoder.go @@ -11,6 +11,7 @@ type packetDecoder interface { getInt64() (int64, error) getVarint() (int64, error) getUVarint() (uint64, error) + getFloat64() (float64, error) getArrayLength() (int, error) getCompactArrayLength() (int, error) getBool() (bool, error) diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go index aea53ca83b..5016e09a6c 100644 --- a/vendor/github.com/Shopify/sarama/packet_encoder.go +++ b/vendor/github.com/Shopify/sarama/packet_encoder.go @@ -13,6 +13,7 @@ type packetEncoder interface { putInt64(in int64) putVarint(in int64) putUVarint(in uint64) + putFloat64(in float64) putCompactArrayLength(in int) putArrayLength(in int) error putBool(in bool) diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go index a66e11ea3f..57377760a7 100644 --- a/vendor/github.com/Shopify/sarama/partitioner.go +++ b/vendor/github.com/Shopify/sarama/partitioner.go @@ -61,9 +61,9 @@ func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption { } // WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty -func WithCustomFallbackPartitioner(randomHP *hashPartitioner) HashPartitionerOption { +func WithCustomFallbackPartitioner(randomHP Partitioner) HashPartitionerOption { return func(hp *hashPartitioner) { - hp.random = hp + hp.random = randomHP } } @@ -169,7 +169,7 @@ func NewHashPartitioner(topic string) Partitioner { // NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values // in the same way as the reference Java implementation. NewHashPartitioner was supposed to do -// that but it had a mistake and now there are people depending on both behaviours. This will +// that but it had a mistake and now there are people depending on both behaviors. This will // all go away on the next major version bump. func NewReferenceHashPartitioner(topic string) Partitioner { p := new(hashPartitioner) diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go index 0d01374879..1602fcb3f6 100644 --- a/vendor/github.com/Shopify/sarama/prep_encoder.go +++ b/vendor/github.com/Shopify/sarama/prep_encoder.go @@ -42,6 +42,10 @@ func (pe *prepEncoder) putUVarint(in uint64) { pe.length += binary.PutUvarint(buf[:], in) } +func (pe *prepEncoder) putFloat64(in float64) { + pe.length += 8 +} + func (pe *prepEncoder) putArrayLength(in int) error { if in > math.MaxInt32 { return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} diff --git a/vendor/github.com/Shopify/sarama/quota_types.go b/vendor/github.com/Shopify/sarama/quota_types.go new file mode 100644 index 0000000000..4f33af0bca --- /dev/null +++ b/vendor/github.com/Shopify/sarama/quota_types.go @@ -0,0 +1,21 @@ +package sarama + +type ( + QuotaEntityType string + + QuotaMatchType int +) + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/quota/ClientQuotaEntity.java +const ( + QuotaEntityUser QuotaEntityType = "user" + QuotaEntityClientID QuotaEntityType = "client-id" + QuotaEntityIP QuotaEntityType = "ip" +) + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasRequest.java +const ( + QuotaMatchExact QuotaMatchType = iota + QuotaMatchDefault + QuotaMatchAny +) diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go index 2482c63776..bab6cb486f 100644 --- a/vendor/github.com/Shopify/sarama/real_decoder.go +++ b/vendor/github.com/Shopify/sarama/real_decoder.go @@ -93,6 +93,16 @@ func (rd *realDecoder) getUVarint() (uint64, error) { return tmp, nil } +func (rd *realDecoder) getFloat64() (float64, error) { + if rd.remaining() < 8 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := math.Float64frombits(binary.BigEndian.Uint64(rd.raw[rd.off:])) + rd.off += 8 + return tmp, nil +} + func (rd *realDecoder) getArrayLength() (int, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) @@ -230,7 +240,9 @@ func (rd *realDecoder) getCompactString() (string, error) { } length := int(n - 1) - + if length < 0 { + return "", errInvalidByteSliceLength + } tmpStr := string(rd.raw[rd.off : rd.off+length]) rd.off += length return tmpStr, nil diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go index c07204cbc7..d6a0ddf124 100644 --- a/vendor/github.com/Shopify/sarama/real_encoder.go +++ b/vendor/github.com/Shopify/sarama/real_encoder.go @@ -3,6 +3,7 @@ package sarama import ( "encoding/binary" "errors" + "math" "github.com/rcrowley/go-metrics" ) @@ -44,6 +45,11 @@ func (re *realEncoder) putUVarint(in uint64) { re.off += binary.PutUvarint(re.raw[re.off:], in) } +func (re *realEncoder) putFloat64(in float64) { + binary.BigEndian.PutUint64(re.raw[re.off:], math.Float64bits(in)) + re.off += 8 +} + func (re *realEncoder) putArrayLength(in int) error { re.putInt32(int32(in)) return nil diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go index d899df5346..ce90eb8c46 100644 --- a/vendor/github.com/Shopify/sarama/request.go +++ b/vendor/github.com/Shopify/sarama/request.go @@ -147,7 +147,7 @@ func allocateBody(key, version int16) protocolBody { case 17: return &SaslHandshakeRequest{} case 18: - return &ApiVersionsRequest{} + return &ApiVersionsRequest{Version: version} case 19: return &CreateTopicsRequest{} case 20: @@ -188,6 +188,12 @@ func allocateBody(key, version int16) protocolBody { return &AlterPartitionReassignmentsRequest{} case 46: return &ListPartitionReassignmentsRequest{} + case 47: + return &DeleteOffsetsRequest{} + case 48: + return &DescribeClientQuotasRequest{} + case 49: + return &AlterClientQuotasRequest{} case 50: return &DescribeUserScramCredentialsRequest{} case 51: diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go index 48f362d287..f6e55a395d 100644 --- a/vendor/github.com/Shopify/sarama/sarama.go +++ b/vendor/github.com/Shopify/sarama/sarama.go @@ -68,13 +68,17 @@ Consumer related metrics: | Name | Type | Description | +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ | consumer-batch-size | histogram | Distribution of the number of messages in a batch | + | consumer-group-join-total- | counter | Total count of consumer group join attempts | + | consumer-group-join-failed- | counter | Total count of consumer group join failures | + | consumer-group-sync-total- | counter | Total count of consumer group sync attempts | + | consumer-group-sync-failed- | counter | Total count of consumer group sync failures | +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ */ package sarama import ( - "io/ioutil" + "io" "log" ) @@ -82,7 +86,7 @@ var ( // Logger is the instance of a StdLogger interface that Sarama writes connection // management events to. By default it is set to discard all log messages via ioutil.Discard, // but you can set it to redirect wherever you want. - Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) + Logger StdLogger = log.New(io.Discard, "[Sarama] ", log.LstdFlags) // PanicHandler is called for recovering from panics spawned internally to the library (and thus // not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. @@ -108,3 +112,21 @@ type StdLogger interface { Printf(format string, v ...interface{}) Println(v ...interface{}) } + +type debugLogger struct{} + +func (d *debugLogger) Print(v ...interface{}) { + Logger.Print(v) +} +func (d *debugLogger) Printf(format string, v ...interface{}) { + Logger.Printf(format, v) +} +func (d *debugLogger) Println(v ...interface{}) { + Logger.Println(v) +} + +// DebugLogger is the instance of a StdLogger that Sarama writes more verbose +// debug information to. By default it is set to redirect all debug to the +// default Logger above, but you can optionally set it to another StdLogger +// instance to (e.g.,) discard debug information +var DebugLogger StdLogger = &debugLogger{} diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go index 021c5a0103..eedece6b4a 100644 --- a/vendor/github.com/Shopify/sarama/sync_producer.go +++ b/vendor/github.com/Shopify/sarama/sync_producer.go @@ -25,10 +25,9 @@ type SyncProducer interface { // SendMessages will return an error. SendMessages(msgs []*ProducerMessage) error - // Close shuts down the producer and waits for any buffered messages to be - // flushed. You must call this function before a producer object passes out of - // scope, as it may otherwise leak memory. You must call this before calling - // Close on the underlying client. + // Close shuts down the producer; you must call this function before a producer + // object passes out of scope, as it may otherwise leak memory. + // You must call this before calling Close on the underlying client. Close() error } @@ -94,8 +93,8 @@ func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offs msg.expectation = expectation sp.producer.Input() <- msg - if err := <-expectation; err != nil { - return -1, -1, err.Err + if pErr := <-expectation; pErr != nil { + return -1, -1, pErr.Err } return msg.Partition, msg.Offset, nil @@ -115,8 +114,8 @@ func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error { var errors ProducerErrors for expectation := range expectations { - if err := <-expectation; err != nil { - errors = append(errors, err) + if pErr := <-expectation; pErr != nil { + errors = append(errors, pErr) } } diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go index 1859d29c21..4ff9730334 100644 --- a/vendor/github.com/Shopify/sarama/utils.go +++ b/vendor/github.com/Shopify/sarama/utils.go @@ -148,22 +148,36 @@ var ( V0_10_1_1 = newKafkaVersion(0, 10, 1, 1) V0_10_2_0 = newKafkaVersion(0, 10, 2, 0) V0_10_2_1 = newKafkaVersion(0, 10, 2, 1) + V0_10_2_2 = newKafkaVersion(0, 10, 2, 2) V0_11_0_0 = newKafkaVersion(0, 11, 0, 0) V0_11_0_1 = newKafkaVersion(0, 11, 0, 1) V0_11_0_2 = newKafkaVersion(0, 11, 0, 2) V1_0_0_0 = newKafkaVersion(1, 0, 0, 0) + V1_0_1_0 = newKafkaVersion(1, 0, 1, 0) + V1_0_2_0 = newKafkaVersion(1, 0, 2, 0) V1_1_0_0 = newKafkaVersion(1, 1, 0, 0) V1_1_1_0 = newKafkaVersion(1, 1, 1, 0) V2_0_0_0 = newKafkaVersion(2, 0, 0, 0) V2_0_1_0 = newKafkaVersion(2, 0, 1, 0) V2_1_0_0 = newKafkaVersion(2, 1, 0, 0) + V2_1_1_0 = newKafkaVersion(2, 1, 1, 0) V2_2_0_0 = newKafkaVersion(2, 2, 0, 0) + V2_2_1_0 = newKafkaVersion(2, 2, 1, 0) + V2_2_2_0 = newKafkaVersion(2, 2, 2, 0) V2_3_0_0 = newKafkaVersion(2, 3, 0, 0) + V2_3_1_0 = newKafkaVersion(2, 3, 1, 0) V2_4_0_0 = newKafkaVersion(2, 4, 0, 0) + V2_4_1_0 = newKafkaVersion(2, 4, 1, 0) V2_5_0_0 = newKafkaVersion(2, 5, 0, 0) + V2_5_1_0 = newKafkaVersion(2, 5, 1, 0) V2_6_0_0 = newKafkaVersion(2, 6, 0, 0) + V2_6_1_0 = newKafkaVersion(2, 6, 1, 0) + V2_6_2_0 = newKafkaVersion(2, 6, 2, 0) V2_7_0_0 = newKafkaVersion(2, 7, 0, 0) + V2_7_1_0 = newKafkaVersion(2, 7, 1, 0) V2_8_0_0 = newKafkaVersion(2, 8, 0, 0) + V2_8_1_0 = newKafkaVersion(2, 8, 1, 0) + V3_0_0_0 = newKafkaVersion(3, 0, 0, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, @@ -177,25 +191,39 @@ var ( V0_10_1_1, V0_10_2_0, V0_10_2_1, + V0_10_2_2, V0_11_0_0, V0_11_0_1, V0_11_0_2, V1_0_0_0, + V1_0_1_0, + V1_0_2_0, V1_1_0_0, V1_1_1_0, V2_0_0_0, V2_0_1_0, V2_1_0_0, + V2_1_1_0, V2_2_0_0, + V2_2_1_0, + V2_2_2_0, V2_3_0_0, + V2_3_1_0, V2_4_0_0, + V2_4_1_0, V2_5_0_0, + V2_5_1_0, V2_6_0_0, + V2_6_1_0, + V2_6_2_0, V2_7_0_0, + V2_7_1_0, V2_8_0_0, + V2_8_1_0, + V3_0_0_0, } MinVersion = V0_8_2_0 - MaxVersion = V2_8_0_0 + MaxVersion = V3_0_0_0 DefaultVersion = V1_0_0_0 ) diff --git a/vendor/github.com/Shopify/sarama/version.go b/vendor/github.com/Shopify/sarama/version.go new file mode 100644 index 0000000000..590329ef84 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/version.go @@ -0,0 +1,20 @@ +package sarama + +import "runtime/debug" + +var v string + +func version() string { + if v == "" { + bi, ok := debug.ReadBuildInfo() + if ok { + v = bi.Main.Version + } else { + // if we can't read a go module version then they're using a git + // clone or vendored module so all we can do is report "dev" for + // the version + v = "dev" + } + } + return v +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index 0668a66cf7..be2b343606 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -15,7 +15,7 @@ type roffRenderer struct { extensions blackfriday.Extensions listCounters []int firstHeader bool - defineTerm bool + firstDD bool listDepth int } @@ -42,7 +42,8 @@ const ( quoteCloseTag = "\n.RE\n" listTag = "\n.RS\n" listCloseTag = "\n.RE\n" - arglistTag = "\n.TP\n" + dtTag = "\n.TP\n" + dd2Tag = "\n" tableStart = "\n.TS\nallbox;\n" tableEnd = ".TE\n" tableCellStart = "T{\n" @@ -90,7 +91,7 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering switch node.Type { case blackfriday.Text: - r.handleText(w, node, entering) + escapeSpecialChars(w, node.Literal) case blackfriday.Softbreak: out(w, crTag) case blackfriday.Hardbreak: @@ -150,40 +151,21 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering out(w, codeCloseTag) case blackfriday.Table: r.handleTable(w, node, entering) - case blackfriday.TableCell: - r.handleTableCell(w, node, entering) case blackfriday.TableHead: case blackfriday.TableBody: case blackfriday.TableRow: // no action as cell entries do all the nroff formatting return blackfriday.GoToNext + case blackfriday.TableCell: + r.handleTableCell(w, node, entering) + case blackfriday.HTMLSpan: + // ignore other HTML tags default: fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String()) } return walkAction } -func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) { - var ( - start, end string - ) - // handle special roff table cell text encapsulation - if node.Parent.Type == blackfriday.TableCell { - if len(node.Literal) > 30 { - start = tableCellStart - end = tableCellEnd - } else { - // end rows that aren't terminated by "tableCellEnd" with a cr if end of row - if node.Parent.Next == nil && !node.Parent.IsHeader { - end = crTag - } - } - } - out(w, start) - escapeSpecialChars(w, node.Literal) - out(w, end) -} - func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) { if entering { switch node.Level { @@ -230,15 +212,20 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering if node.ListFlags&blackfriday.ListTypeOrdered != 0 { out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1])) r.listCounters[len(r.listCounters)-1]++ + } else if node.ListFlags&blackfriday.ListTypeTerm != 0 { + // DT (definition term): line just before DD (see below). + out(w, dtTag) + r.firstDD = true } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { - // state machine for handling terms and following definitions - // since blackfriday does not distinguish them properly, nor - // does it seperate them into separate lists as it should - if !r.defineTerm { - out(w, arglistTag) - r.defineTerm = true + // DD (definition description): line that starts with ": ". + // + // We have to distinguish between the first DD and the + // subsequent ones, as there should be no vertical + // whitespace between the DT and the first DD. + if r.firstDD { + r.firstDD = false } else { - r.defineTerm = false + out(w, dd2Tag) } } else { out(w, ".IP \\(bu 2\n") @@ -251,7 +238,7 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) { if entering { out(w, tableStart) - //call walker to count cells (and rows?) so format section can be produced + // call walker to count cells (and rows?) so format section can be produced columns := countColumns(node) out(w, strings.Repeat("l ", columns)+"\n") out(w, strings.Repeat("l ", columns)+".\n") @@ -261,28 +248,41 @@ func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering } func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) { - var ( - start, end string - ) - if node.IsHeader { - start = codespanTag - end = codespanCloseTag - } if entering { + var start string if node.Prev != nil && node.Prev.Type == blackfriday.TableCell { - out(w, "\t"+start) - } else { - out(w, start) + start = "\t" + } + if node.IsHeader { + start += codespanTag + } else if nodeLiteralSize(node) > 30 { + start += tableCellStart } + out(w, start) } else { - // need to carriage return if we are at the end of the header row - if node.IsHeader && node.Next == nil { - end = end + crTag + var end string + if node.IsHeader { + end = codespanCloseTag + } else if nodeLiteralSize(node) > 30 { + end = tableCellEnd + } + if node.Next == nil && end != tableCellEnd { + // Last cell: need to carriage return if we are at the end of the + // header row and content isn't wrapped in a "tablecell" + end += crTag } out(w, end) } } +func nodeLiteralSize(node *blackfriday.Node) int { + total := 0 + for n := node.FirstChild; n != nil; n = n.FirstChild { + total += len(n.Literal) + } + return total +} + // because roff format requires knowing the column count before outputting any table // data we need to walk a table tree and count the columns func countColumns(node *blackfriday.Node) int { @@ -309,15 +309,6 @@ func out(w io.Writer, output string) { io.WriteString(w, output) // nolint: errcheck } -func needsBackslash(c byte) bool { - for _, r := range []byte("-_&\\~") { - if c == r { - return true - } - } - return false -} - func escapeSpecialChars(w io.Writer, text []byte) { for i := 0; i < len(text); i++ { // escape initial apostrophe or period @@ -328,7 +319,7 @@ func escapeSpecialChars(w io.Writer, text []byte) { // directly copy normal characters org := i - for i < len(text) && !needsBackslash(text[i]) { + for i < len(text) && text[i] != '\\' { i++ } if i > org { diff --git a/vendor/github.com/evanphx/json-patch/v5/patch.go b/vendor/github.com/evanphx/json-patch/v5/patch.go index f24e9d59c7..117f2c00df 100644 --- a/vendor/github.com/evanphx/json-patch/v5/patch.go +++ b/vendor/github.com/evanphx/json-patch/v5/patch.go @@ -766,9 +766,9 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error { } } - // Check if the next part is a numeric index. + // Check if the next part is a numeric index or "-". // If yes, then create an array, otherwise, create an object. - if arrIndex, err = strconv.Atoi(parts[pi+1]); err == nil { + if arrIndex, err = strconv.Atoi(parts[pi+1]); err == nil || parts[pi+1] == "-" { if arrIndex < 0 { if !options.SupportNegativeIndices { @@ -845,6 +845,29 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro return errors.Wrapf(err, "replace operation failed to decode path") } + if path == "" { + val := op.value() + + if val.which == eRaw { + if !val.tryDoc() { + if !val.tryAry() { + return errors.Wrapf(err, "replace operation value must be object or array") + } + } + } + + switch val.which { + case eAry: + *doc = &val.ary + case eDoc: + *doc = val.doc + case eRaw: + return errors.Wrapf(err, "replace operation hit impossible case") + } + + return nil + } + con, key := findObject(doc, path, options) if con == nil { @@ -911,6 +934,25 @@ func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error { return errors.Wrapf(err, "test operation failed to decode path") } + if path == "" { + var self lazyNode + + switch sv := (*doc).(type) { + case *partialDoc: + self.doc = sv + self.which = eDoc + case *partialArray: + self.ary = *sv + self.which = eAry + } + + if self.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + con, key := findObject(doc, path, options) if con == nil { diff --git a/vendor/github.com/form3tech-oss/jwt-go/.gitignore b/vendor/github.com/form3tech-oss/jwt-go/.gitignore new file mode 100644 index 0000000000..c0e81a8d92 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/.gitignore @@ -0,0 +1,5 @@ +.DS_Store +bin +.idea/ + + diff --git a/vendor/github.com/form3tech-oss/jwt-go/.travis.yml b/vendor/github.com/form3tech-oss/jwt-go/.travis.yml new file mode 100644 index 0000000000..3c7fb7e1ae --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/.travis.yml @@ -0,0 +1,12 @@ +language: go + +script: + - go vet ./... + - go test -v ./... + +go: + - 1.12 + - 1.13 + - 1.14 + - 1.15 + - tip diff --git a/vendor/github.com/form3tech-oss/jwt-go/LICENSE b/vendor/github.com/form3tech-oss/jwt-go/LICENSE new file mode 100644 index 0000000000..df83a9c2f0 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md new file mode 100644 index 0000000000..7fc1f793cb --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md @@ -0,0 +1,97 @@ +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff --git a/vendor/github.com/form3tech-oss/jwt-go/README.md b/vendor/github.com/form3tech-oss/jwt-go/README.md new file mode 100644 index 0000000000..d7749077fd --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/README.md @@ -0,0 +1,104 @@ +# jwt-go + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) +[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. + +**BREAKING CHANGES:*** +* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +### Troubleshooting + +This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md b/vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md new file mode 100644 index 0000000000..6370298313 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md @@ -0,0 +1,118 @@ +## `jwt-go` Version History + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/form3tech-oss/jwt-go/claims.go b/vendor/github.com/form3tech-oss/jwt-go/claims.go new file mode 100644 index 0000000000..624890666c --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/claims.go @@ -0,0 +1,136 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience []string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if c.VerifyExpiresAt(now, false) == false { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if c.VerifyIssuedAt(now, false) == false { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if c.VerifyNotBefore(now, false) == false { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud []string, cmp string, required bool) bool { + if len(aud) == 0 { + return !required + } + + for _, a := range aud { + if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { + return true + } + } + return false +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/doc.go b/vendor/github.com/form3tech-oss/jwt-go/doc.go new file mode 100644 index 0000000000..a86dc1a3b3 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/form3tech-oss/jwt-go/ecdsa.go b/vendor/github.com/form3tech-oss/jwt-go/ecdsa.go new file mode 100644 index 0000000000..f977381240 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/ecdsa.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go b/vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go new file mode 100644 index 0000000000..db9f4be7d8 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/errors.go b/vendor/github.com/form3tech-oss/jwt-go/errors.go new file mode 100644 index 0000000000..1c93024aad --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/errors.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/hmac.go b/vendor/github.com/form3tech-oss/jwt-go/hmac.go new file mode 100644 index 0000000000..addbe5d401 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/hmac.go @@ -0,0 +1,95 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKeyType +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/map_claims.go b/vendor/github.com/form3tech-oss/jwt-go/map_claims.go new file mode 100644 index 0000000000..90ab6bea35 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/map_claims.go @@ -0,0 +1,102 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + aud, ok := m["aud"].([]string) + if !ok { + strAud, ok := m["aud"].(string) + if !ok { + return false + } + aud = append(aud, strAud) + } + + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return req == false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return req == false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return req == false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if m.VerifyExpiresAt(now, false) == false { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if m.VerifyIssuedAt(now, false) == false { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if m.VerifyNotBefore(now, false) == false { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/none.go b/vendor/github.com/form3tech-oss/jwt-go/none.go new file mode 100644 index 0000000000..f04d189d06 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/parser.go b/vendor/github.com/form3tech-oss/jwt-go/parser.go new file mode 100644 index 0000000000..d6901d9adb --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/parser.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} + +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/rsa.go b/vendor/github.com/form3tech-oss/jwt-go/rsa.go new file mode 100644 index 0000000000..e4caf1ca4a --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/rsa.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go b/vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go new file mode 100644 index 0000000000..c014708648 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go @@ -0,0 +1,142 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions + // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS. + // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow + // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously. + // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details. + VerifyOptions *rsa.PSSOptions +} + +// Specific instances for RS/PS and company. +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + opts := m.Options + if m.VerifyOptions != nil { + opts = m.VerifyOptions + } + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go b/vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go new file mode 100644 index 0000000000..14c78c292a --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be a PEM encoded PKCS1 or PKCS8 key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/signing_method.go b/vendor/github.com/form3tech-oss/jwt-go/signing_method.go new file mode 100644 index 0000000000..ed1f212b21 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/token.go b/vendor/github.com/form3tech-oss/jwt-go/token.go new file mode 100644 index 0000000000..d637e0867c --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/token.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/fsnotify/fsnotify/.mailmap b/vendor/github.com/fsnotify/fsnotify/.mailmap new file mode 100644 index 0000000000..a04f2907fe --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.mailmap @@ -0,0 +1,2 @@ +Chris Howey +Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml deleted file mode 100644 index a9c30165cd..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -sudo: false -language: go - -go: - - "stable" - - "1.11.x" - - "1.10.x" - - "1.9.x" - -matrix: - include: - - go: "stable" - env: GOLINT=true - allow_failures: - - go: tip - fast_finish: true - - -before_install: - - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi - -script: - - go test --race ./... - -after_script: - - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi - - go vet ./... - -os: - - linux - - osx - - windows - -notifications: - email: false diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS index 5ab5d41c54..6cbabe5ef5 100644 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -4,35 +4,44 @@ # You can update this list using the following command: # -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' +# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS # Please keep the list sorted. Aaron L Adrien Bustany +Alexey Kazakov Amit Krishnan Anmol Sethi Bjørn Erik Pedersen +Brian Goff Bruno Bigras Caleb Spare Case Nelson -Chris Howey +Chris Howey Christoffer Buchholz Daniel Wagner-Hall Dave Cheney +Eric Lin Evan Phoenix Francisco Souza +Gautam Dey Hari haran -John C Barstow +Ichinose Shogo +Johannes Ebke +John C Barstow Kelvin Fo Ken-ichirou MATSUZAWA Matt Layher +Matthias Stone Nathan Youngman Nickolai Zeldovich +Oliver Bristow Patrick Paul Hammond Pawel Knap Pieter Droogendijk +Pratik Shinde Pursuit92 Riku Voipio Rob Figueiredo @@ -41,6 +50,7 @@ Slawek Ligus Soge Zhang Tiffany Jernigan Tilak Sharma +Tobias Klauser Tom Payne Travis Cline Tudor Golubenco diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index be4d7ea2c1..a438fe4b4a 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,6 +1,28 @@ # Changelog -## v1.4.7 / 2018-01-09 +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.5.1] - 2021-08-24 + +* Revert Add AddRaw to not follow symlinks + +## [1.5.0] - 2021-08-20 + +* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) +* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) +* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) +* CI: Use GitHub Actions for CI and cover go 1.12-1.17 + [#378](https://github.com/fsnotify/fsnotify/pull/378) + [#381](https://github.com/fsnotify/fsnotify/pull/381) + [#385](https://github.com/fsnotify/fsnotify/pull/385) +* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) + +## [1.4.7] - 2018-01-09 * BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) * Tests: Fix missing verb on format string (thanks @rchiossi) @@ -10,62 +32,62 @@ * Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) * Docs: replace references to OS X with macOS -## v1.4.2 / 2016-10-10 +## [1.4.2] - 2016-10-10 * Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) -## v1.4.1 / 2016-10-04 +## [1.4.1] - 2016-10-04 * Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) -## v1.4.0 / 2016-10-01 +## [1.4.0] - 2016-10-01 * add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) -## v1.3.1 / 2016-06-28 +## [1.3.1] - 2016-06-28 * Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) -## v1.3.0 / 2016-04-19 +## [1.3.0] - 2016-04-19 * Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) -## v1.2.10 / 2016-03-02 +## [1.2.10] - 2016-03-02 * Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) -## v1.2.9 / 2016-01-13 +## [1.2.9] - 2016-01-13 kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) -## v1.2.8 / 2015-12-17 +## [1.2.8] - 2015-12-17 * kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) * inotify: fix race in test * enable race detection for continuous integration (Linux, Mac, Windows) -## v1.2.5 / 2015-10-17 +## [1.2.5] - 2015-10-17 * inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) * inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) * kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) * kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) -## v1.2.1 / 2015-10-14 +## [1.2.1] - 2015-10-14 * kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) -## v1.2.0 / 2015-02-08 +## [1.2.0] - 2015-02-08 * inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) * inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) * kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) -## v1.1.1 / 2015-02-05 +## [1.1.1] - 2015-02-05 * inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) -## v1.1.0 / 2014-12-12 +## [1.1.0] - 2014-12-12 * kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) * add low-level functions @@ -77,22 +99,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) * kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) -## v1.0.4 / 2014-09-07 +## [1.0.4] - 2014-09-07 * kqueue: add dragonfly to the build tags. * Rename source code files, rearrange code so exported APIs are at the top. * Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) -## v1.0.3 / 2014-08-19 +## [1.0.3] - 2014-08-19 * [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) -## v1.0.2 / 2014-08-17 +## [1.0.2] - 2014-08-17 * [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) * [Fix] Make ./path and path equivalent. (thanks @zhsso) -## v1.0.0 / 2014-08-15 +## [1.0.0] - 2014-08-15 * [API] Remove AddWatch on Windows, use Add. * Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) @@ -146,51 +168,51 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * no tests for the current implementation * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) -## v0.9.3 / 2014-12-31 +## [0.9.3] - 2014-12-31 * kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) -## v0.9.2 / 2014-08-17 +## [0.9.2] - 2014-08-17 * [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) -## v0.9.1 / 2014-06-12 +## [0.9.1] - 2014-06-12 * Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) -## v0.9.0 / 2014-01-17 +## [0.9.0] - 2014-01-17 * IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) * [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) * [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. -## v0.8.12 / 2013-11-13 +## [0.8.12] - 2013-11-13 * [API] Remove FD_SET and friends from Linux adapter -## v0.8.11 / 2013-11-02 +## [0.8.11] - 2013-11-02 * [Doc] Add Changelog [#72][] (thanks @nathany) * [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) -## v0.8.10 / 2013-10-19 +## [0.8.10] - 2013-10-19 * [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) * [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) * [Doc] specify OS-specific limits in README (thanks @debrando) -## v0.8.9 / 2013-09-08 +## [0.8.9] - 2013-09-08 * [Doc] Contributing (thanks @nathany) * [Doc] update package path in example code [#63][] (thanks @paulhammond) * [Doc] GoCI badge in README (Linux only) [#60][] * [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) -## v0.8.8 / 2013-06-17 +## [0.8.8] - 2013-06-17 * [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) -## v0.8.7 / 2013-06-03 +## [0.8.7] - 2013-06-03 * [API] Make syscall flags internal * [Fix] inotify: ignore event changes @@ -198,74 +220,74 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * [Fix] tests on Windows * lower case error messages -## v0.8.6 / 2013-05-23 +## [0.8.6] - 2013-05-23 * kqueue: Use EVT_ONLY flag on Darwin * [Doc] Update README with full example -## v0.8.5 / 2013-05-09 +## [0.8.5] - 2013-05-09 * [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) -## v0.8.4 / 2013-04-07 +## [0.8.4] - 2013-04-07 * [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) -## v0.8.3 / 2013-03-13 +## [0.8.3] - 2013-03-13 * [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) * [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) -## v0.8.2 / 2013-02-07 +## [0.8.2] - 2013-02-07 * [Doc] add Authors * [Fix] fix data races for map access [#29][] (thanks @fsouza) -## v0.8.1 / 2013-01-09 +## [0.8.1] - 2013-01-09 * [Fix] Windows path separators * [Doc] BSD License -## v0.8.0 / 2012-11-09 +## [0.8.0] - 2012-11-09 * kqueue: directory watching improvements (thanks @vmirage) * inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) * [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) -## v0.7.4 / 2012-10-09 +## [0.7.4] - 2012-10-09 * [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) * [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) * [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) * [Fix] kqueue: modify after recreation of file -## v0.7.3 / 2012-09-27 +## [0.7.3] - 2012-09-27 * [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) * [Fix] kqueue: no longer get duplicate CREATE events -## v0.7.2 / 2012-09-01 +## [0.7.2] - 2012-09-01 * kqueue: events for created directories -## v0.7.1 / 2012-07-14 +## [0.7.1] - 2012-07-14 * [Fix] for renaming files -## v0.7.0 / 2012-07-02 +## [0.7.0] - 2012-07-02 * [Feature] FSNotify flags * [Fix] inotify: Added file name back to event path -## v0.6.0 / 2012-06-06 +## [0.6.0] - 2012-06-06 * kqueue: watch files after directory created (thanks @tmc) -## v0.5.1 / 2012-05-22 +## [0.5.1] - 2012-05-22 * [Fix] inotify: remove all watches before Close() -## v0.5.0 / 2012-05-03 +## [0.5.0] - 2012-05-03 * [API] kqueue: return errors during watch instead of sending over channel * kqueue: match symlink behavior on Linux @@ -273,22 +295,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * [Fix] kqueue: handle EINTR (reported by @robfig) * [Doc] Godoc example [#1][] (thanks @davecheney) -## v0.4.0 / 2012-03-30 +## [0.4.0] - 2012-03-30 * Go 1 released: build with go tool * [Feature] Windows support using winfsnotify * Windows does not have attribute change notifications * Roll attribute notifications into IsModify -## v0.3.0 / 2012-02-19 +## [0.3.0] - 2012-02-19 * kqueue: add files when watch directory -## v0.2.0 / 2011-12-30 +## [0.2.0] - 2011-12-30 * update to latest Go weekly code -## v0.1.0 / 2011-10-19 +## [0.1.0] - 2011-10-19 * kqueue: add watch on file creation to match inotify * kqueue: create file event diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index b2629e5229..df57b1b282 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -12,9 +12,9 @@ Cross platform: Windows, Linux, BSD and macOS. | Adapter | OS | Status | | --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| inotify | Linux 2.6.27 or later, Android\* | Supported | +| kqueue | BSD, macOS, iOS\* | Supported | +| ReadDirectoryChangesW | Windows | Supported | | FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | | FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | | fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go index ced39cb881..b3ac3d8f55 100644 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ b/vendor/github.com/fsnotify/fsnotify/fen.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build solaris // +build solaris package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 89cab046d1..0f4ee52e8a 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !plan9 // +build !plan9 // Package fsnotify provides a platform-independent interface for file system notifications. diff --git a/vendor/github.com/fsnotify/fsnotify/go.mod b/vendor/github.com/fsnotify/fsnotify/go.mod index ff11e13f22..54089e48b7 100644 --- a/vendor/github.com/fsnotify/fsnotify/go.mod +++ b/vendor/github.com/fsnotify/fsnotify/go.mod @@ -2,4 +2,6 @@ module github.com/fsnotify/fsnotify go 1.13 -require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 +require golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c + +retract v1.5.0 diff --git a/vendor/github.com/fsnotify/fsnotify/go.sum b/vendor/github.com/fsnotify/fsnotify/go.sum index f60af9855d..0f478630ca 100644 --- a/vendor/github.com/fsnotify/fsnotify/go.sum +++ b/vendor/github.com/fsnotify/fsnotify/go.sum @@ -1,2 +1,2 @@ -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go index d9fd1b88a0..eb87699b5b 100644 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux // +build linux package fsnotify @@ -272,7 +273,7 @@ func (w *Watcher) readEvents() { if nameLen > 0 { // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] // The filename is padded with NULL bytes. TrimRight() gets rid of those. name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") } diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go index b33f2b4d4b..e9ff9439f7 100644 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux // +build linux package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go index 86e76a3d67..368f5b790d 100644 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build freebsd || openbsd || netbsd || dragonfly || darwin // +build freebsd openbsd netbsd dragonfly darwin package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go index 2306c4620b..36cc3845b6 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build freebsd || openbsd || netbsd || dragonfly // +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go index 870c4d6d18..98cd8476ff 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin // +build darwin package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go index 09436f31d8..c02b75f7c3 100644 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ b/vendor/github.com/fsnotify/fsnotify/windows.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build windows // +build windows package fsnotify diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore new file mode 100644 index 0000000000..e256a31e00 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.gitignore @@ -0,0 +1,20 @@ +# OSX leaves these everywhere on SMB shares +._* + +# Eclipse files +.classpath +.project +.settings/** + +# Emacs save files +*~ + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + +# Go test binaries +*.test diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml new file mode 100644 index 0000000000..0e9d6edc01 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.3 + - 1.4 +script: + - go test + - go build diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE new file mode 100644 index 0000000000..7805d36de7 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md new file mode 100644 index 0000000000..0200f75b4d --- /dev/null +++ b/vendor/github.com/ghodss/yaml/README.md @@ -0,0 +1,121 @@ +# YAML marshaling and unmarshaling support for Go + +[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) + +## Introduction + +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. + +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). + +## Compatibility + +This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). + +## Caveats + +**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: + +``` +BAD: + exampleKey: !!binary gIGC + +GOOD: + exampleKey: gIGC +... and decode the base64 data in your code. +``` + +**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. + +## Installation and usage + +To install, run: + +``` +$ go get github.com/ghodss/yaml +``` + +And import using: + +``` +import "github.com/ghodss/yaml" +``` + +Usage is very similar to the JSON library: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +type Person struct { + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"age"` +} + +func main() { + // Marshal a Person struct to YAML. + p := Person{"John", 30} + y, err := yaml.Marshal(p) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + age: 30 + name: John + */ + + // Unmarshal the YAML back into a Person struct. + var p2 Person + err = yaml.Unmarshal(y, &p2) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(p2) + /* Output: + {John 30} + */ +} +``` + +`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +func main() { + j := []byte(`{"name": "John", "age": 30}`) + y, err := yaml.JSONToYAML(j) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + name: John + age: 30 + */ + j2, err := yaml.YAMLToJSON(y) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(j2)) + /* Output: + {"age":30,"name":"John"} + */ +} +``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go new file mode 100644 index 0000000000..5860074026 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/fields.go @@ -0,0 +1,501 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package yaml + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go new file mode 100644 index 0000000000..4fb4054a8b --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -0,0 +1,277 @@ +package yaml + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strconv" + + "gopkg.in/yaml.v2" +) + +// Marshals the object into JSON then converts JSON to YAML and returns the +// YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: %v", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + } + + return y, nil +} + +// Converts YAML to JSON then uses JSON to unmarshal into an object. +func Unmarshal(y []byte, o interface{}) error { + vo := reflect.ValueOf(o) + j, err := yamlToJSON(y, &vo) + if err != nil { + return fmt.Errorf("error converting YAML to JSON: %v", err) + } + + err = json.Unmarshal(j, o) + if err != nil { + return fmt.Errorf("error unmarshaling JSON: %v", err) + } + + return nil +} + +// Convert JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} + +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through +// this method should be a no-op. +// +// Things YAML can do that are not supported by JSON: +// * In YAML you can have binary and null keys in your maps. These are invalid +// in JSON. (int and float keys are converted to strings.) +// * Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +func YAMLToJSON(y []byte) ([]byte, error) { + return yamlToJSON(y, nil) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { + // Convert the YAML to an object. + var yamlObj interface{} + err := yaml.Unmarshal(y, &yamlObj) + if err != nil { + return nil, err + } + + // YAML objects are not completely compatible with JSON objects (e.g. you + // can have non-string keys in YAML). So, convert the YAML-compatible object + // to a JSON-compatible object, failing with an error if irrecoverable + // incompatibilties happen along the way. + jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) + if err != nil { + return nil, err + } + + // Convert this object to JSON and return the data. + return json.Marshal(jsonObj) +} + +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { + var err error + + // Resolve jsonTarget to a concrete value (i.e. not a pointer or an + // interface). We pass decodingNull as false because we're not actually + // decoding into the value, we're just checking if the ultimate target is a + // string. + if jsonTarget != nil { + ju, tu, pv := indirect(*jsonTarget, false) + // We have a JSON or Text Umarshaler at this level, so we can't be trying + // to decode into a string. + if ju != nil || tu != nil { + jsonTarget = nil + } else { + jsonTarget = &pv + } + } + + // If yamlObj is a number or a boolean, check if jsonTarget is a string - + // if so, coerce. Else return normal. + // If yamlObj is a map or array, find the field that each key is + // unmarshaling to, and when you recurse pass the reflect.Value for that + // field back into this function. + switch typedYAMLObj := yamlObj.(type) { + case map[interface{}]interface{}: + // JSON does not support arbitrary keys in a map, so we must convert + // these keys to strings. + // + // From my reading of go-yaml v2 (specifically the resolve function), + // keys can only have the types string, int, int64, float64, binary + // (unsupported), or null (unsupported). + strMap := make(map[string]interface{}) + for k, v := range typedYAMLObj { + // Resolve the key to a string first. + var keyString string + switch typedKey := k.(type) { + case string: + keyString = typedKey + case int: + keyString = strconv.Itoa(typedKey) + case int64: + // go-yaml will only return an int64 as a key if the system + // architecture is 32-bit and the key's value is between 32-bit + // and 64-bit. Otherwise the key type will simply be int. + keyString = strconv.FormatInt(typedKey, 10) + case float64: + // Stolen from go-yaml to use the same conversion to string as + // the go-yaml library uses to convert float to string when + // Marshaling. + s := strconv.FormatFloat(typedKey, 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + keyString = s + case bool: + if typedKey { + keyString = "true" + } else { + keyString = "false" + } + default: + return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + reflect.TypeOf(k), k, v) + } + + // jsonTarget should be a struct or a map. If it's a struct, find + // the field it's going to map to and pass its reflect.Value. If + // it's a map, find the element type of the map and pass the + // reflect.Value created from that type. If it's neither, just pass + // nil - JSON conversion will error for us if it's a real issue. + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Struct { + keyBytes := []byte(keyString) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, keyBytes) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential + // struct field. + jtf := t.Field(f.index[0]) + strMap[keyString], err = convertToJSONableObject(v, &jtf) + if err != nil { + return nil, err + } + continue + } + } else if t.Kind() == reflect.Map { + // Create a zero value of the map's element type to use as + // the JSON target. + jtv := reflect.Zero(t.Type().Elem()) + strMap[keyString], err = convertToJSONableObject(v, &jtv) + if err != nil { + return nil, err + } + continue + } + } + strMap[keyString], err = convertToJSONableObject(v, nil) + if err != nil { + return nil, err + } + } + return strMap, nil + case []interface{}: + // We need to recurse into arrays in case there are any + // map[interface{}]interface{}'s inside and to convert any + // numbers to strings. + + // If jsonTarget is a slice (which it really should be), find the + // thing it's going to map to. If it's not a slice, just pass nil + // - JSON conversion will error for us if it's a real issue. + var jsonSliceElemValue *reflect.Value + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Slice { + // By default slices point to nil, but we need a reflect.Value + // pointing to a value of the slice type, so we create one here. + ev := reflect.Indirect(reflect.New(t.Type().Elem())) + jsonSliceElemValue = &ev + } + } + + // Make and use a new array. + arr := make([]interface{}, len(typedYAMLObj)) + for i, v := range typedYAMLObj { + arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) + if err != nil { + return nil, err + } + } + return arr, nil + default: + // If the target type is a string and the YAML type is a number, + // convert the YAML type to a string. + if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { + // Based on my reading of go-yaml, it may return int, int64, + // float64, or uint64. + var s string + switch typedVal := typedYAMLObj.(type) { + case int: + s = strconv.FormatInt(int64(typedVal), 10) + case int64: + s = strconv.FormatInt(typedVal, 10) + case float64: + s = strconv.FormatFloat(typedVal, 'g', -1, 32) + case uint64: + s = strconv.FormatUint(typedVal, 10) + case bool: + if typedVal { + s = "true" + } else { + s = "false" + } + } + if len(s) > 0 { + yamlObj = interface{}(s) + } + } + return yamlObj, nil + } + + return nil, nil +} diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml new file mode 100644 index 0000000000..94ff801df1 --- /dev/null +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -0,0 +1,29 @@ +run: + timeout: 1m + tests: true + +linters: + disable-all: true + enable: + - asciicheck + - deadcode + - errcheck + - forcetypeassert + - gocritic + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + +issues: + exclude-use-default: false + max-issues-per-linter: 0 + max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md new file mode 100644 index 0000000000..c356960046 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CHANGELOG.md @@ -0,0 +1,6 @@ +# CHANGELOG + +## v1.0.0-rc1 + +This is the first logged release. Major changes (including breaking changes) +have occurred since earlier tags. diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md new file mode 100644 index 0000000000..5d37e294c5 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CONTRIBUTING.md @@ -0,0 +1,17 @@ +# Contributing + +Logr is open to pull-requests, provided they fit within the intended scope of +the project. Specifically, this library aims to be VERY small and minimalist, +with no external dependencies. + +## Compatibility + +This project intends to follow [semantic versioning](http://semver.org) and +is very strict about compatibility. Any proposed changes MUST follow those +rules. + +## Performance + +As a logging library, logr must be as light-weight as possible. Any proposed +code change must include results of running the [benchmark](./benchmark) +before and after the change. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index e9b5520a1c..ad825f5f0a 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,112 +1,182 @@ -# A more minimal logging API for Go +# A minimal logging API for Go -Before you consider this package, please read [this blog post by the -inimitable Dave Cheney][warning-makes-no-sense]. I really appreciate what -he has to say, and it largely aligns with my own experiences. Too many -choices of levels means inconsistent logs. +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) + +logr offers an(other) opinion on how Go programs and libraries can do logging +without becoming coupled to a particular logging implementation. This is not +an implementation of logging - it is an API. In fact it is two APIs with two +different sets of users. + +The `Logger` type is intended for application and library authors. It provides +a relatively small API which can be used everywhere you want to emit logs. It +defers the actual act of writing logs (to files, to stdout, or whatever) to the +`LogSink` interface. + +The `LogSink` interface is intended for logging library implementers. It is a +pure interface which can be implemented by logging frameworks to provide the actual logging +functionality. + +This decoupling allows application and library developers to write code in +terms of `logr.Logger` (which has very low dependency fan-out) while the +implementation of logging is managed "up stack" (e.g. in or near `main()`.) +Application developers can then switch out implementations as necessary. + +Many people assert that libraries should not be logging, and as such efforts +like this are pointless. Those people are welcome to convince the authors of +the tens-of-thousands of libraries that *DO* write logs that they are all +wrong. In the meantime, logr takes a more practical approach. + +## Typical usage + +Somewhere, early in an application's life, it will make a decision about which +logging library (implementation) it actually wants to use. Something like: + +``` + func main() { + // ... other setup code ... + + // Create the "root" logger. We have chosen the "logimpl" implementation, + // which takes some initial parameters and returns a logr.Logger. + logger := logimpl.New(param1, param2) + + // ... other setup code ... +``` + +Most apps will call into other libraries, create structures to govern the flow, +etc. The `logr.Logger` object can be passed to these other libraries, stored +in structs, or even used as a package-global variable, if needed. For example: + +``` + app := createTheAppObject(logger) + app.Run() +``` + +Outside of this early setup, no other packages need to know about the choice of +implementation. They write logs in terms of the `logr.Logger` that they +received: -This package offers a purely abstract interface, based on these ideas but with -a few twists. Code can depend on just this interface and have the actual -logging implementation be injected from callers. Ideally only `main()` knows -what logging implementation is being used. +``` + type appObject struct { + // ... other fields ... + logger logr.Logger + // ... other fields ... + } -# Differences from Dave's ideas + func (app *appObject) Run() { + app.logger.Info("starting up", "timestamp", time.Now()) + + // ... app code ... +``` + +## Background + +If the Go standard library had defined an interface for logging, this project +probably would not be needed. Alas, here we are. + +### Inspiration + +Before you consider this package, please read [this blog post by the +inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what +he has to say, and it largely aligns with our own experiences. + +### Differences from Dave's ideas The main differences are: -1) Dave basically proposes doing away with the notion of a logging API in favor -of `fmt.Printf()`. I disagree, especially when you consider things like output -locations, timestamps, file and line decorations, and structured logging. I -restrict the API to just 2 types of logs: info and error. +1. Dave basically proposes doing away with the notion of a logging API in favor +of `fmt.Printf()`. We disagree, especially when you consider things like output +locations, timestamps, file and line decorations, and structured logging. This +package restricts the logging API to just 2 types of logs: info and error. Info logs are things you want to tell the user which are not errors. Error logs are, well, errors. If your code receives an `error` from a subordinate function call and is logging that `error` *and not returning it*, use error logs. -2) Verbosity-levels on info logs. This gives developers a chance to indicate +2. Verbosity-levels on info logs. This gives developers a chance to indicate arbitrary grades of importance for info logs, without assigning names with -semantic meaning such as "warning", "trace", and "debug". Superficially this +semantic meaning such as "warning", "trace", and "debug." Superficially this may feel very similar, but the primary difference is the lack of semantics. Because verbosity is a numerical value, it's safe to assume that an app running with higher verbosity means more (and less important) logs will be generated. -This is a BETA grade API. +## Implementations (non-exhaustive) There are implementations for the following logging libraries: +- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) - **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) -- **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr) +- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) - **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) -- **log** (the Go standard library logger): - [stdr](https://github.com/go-logr/stdr) +- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) - **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) +- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) -# FAQ +## FAQ -## Conceptual +### Conceptual -## Why structured logging? +#### Why structured logging? -- **Structured logs are more easily queriable**: Since you've got +- **Structured logs are more easily queryable**: Since you've got key-value pairs, it's much easier to query your structured logs for particular values by filtering on the contents of a particular key -- think searching request logs for error codes, Kubernetes reconcilers for - the name and namespace of the reconciled object, etc + the name and namespace of the reconciled object, etc. -- **Structured logging makes it easier to have cross-referencable logs**: +- **Structured logging makes it easier to have cross-referenceable logs**: Similarly to searchability, if you maintain conventions around your keys, it becomes easy to gather all log lines related to a particular concept. - + - **Structured logs allow better dimensions of filtering**: if you have structure to your logs, you've got more precise control over how much information is logged -- you might choose in a particular configuration to log certain keys but not others, only log lines where a certain key - matches a certain value, etc, instead of just having v-levels and names + matches a certain value, etc., instead of just having v-levels and names to key off of. - **Structured logs better represent structured data**: sometimes, the data that you want to log is inherently structured (think tuple-link - objects). Structured logs allow you to preserve that structure when + objects.) Structured logs allow you to preserve that structure when outputting. -## Why V-levels? +#### Why V-levels? **V-levels give operators an easy way to control the chattiness of log operations**. V-levels provide a way for a given package to distinguish the relative importance or verbosity of a given log message. Then, if a particular logger or package is logging too many messages, the user -of the package can simply change the v-levels for that library. +of the package can simply change the v-levels for that library. -## Why not more named levels, like Warning? +#### Why not named levels, like Info/Warning/Error? Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences from Dave's ideas](#differences-from-daves-ideas). -## Why not allow format strings, too? +#### Why not allow format strings, too? **Format strings negate many of the benefits of structured logs**: - They're not easily searchable without resorting to fuzzy searching, - regular expressions, etc + regular expressions, etc. - They don't store structured data well, since contents are flattened into - a string + a string. -- They're not cross-referencable +- They're not cross-referenceable. -- They don't compress easily, since the message is not constant +- They don't compress easily, since the message is not constant. -(unless you turn positional parameters into key-value pairs with numerical +(Unless you turn positional parameters into key-value pairs with numerical keys, at which point you've gotten key-value logging with meaningless -keys) +keys.) -## Practical +### Practical -## Why key-value pairs, and not a map? +#### Why key-value pairs, and not a map? Key-value pairs are *much* easier to optimize, especially around allocations. Zap (a structured logger that inspired logr's interface) has @@ -117,26 +187,26 @@ While the interface ends up being a little less obvious, you get potentially better performance, plus avoid making users type `map[string]string{}` every time they want to log. -## What if my V-levels differ between libraries? +#### What if my V-levels differ between libraries? That's fine. Control your V-levels on a per-logger basis, and use the -`WithName` function to pass different loggers to different libraries. +`WithName` method to pass different loggers to different libraries. Generally, you should take care to ensure that you have relatively consistent V-levels within a given logger, however, as this makes deciding on what verbosity of logs to request easier. -## But I *really* want to use a format string! +#### But I really want to use a format string! That's not actually a question. Assuming your question is "how do I convert my mental model of logging with format strings to logging with constant messages": -1. figure out what the error actually is, as you'd write in a TL;DR style, - and use that as a message +1. Figure out what the error actually is, as you'd write in a TL;DR style, + and use that as a message. 2. For every place you'd write a format specifier, look to the word before - it, and add that as a key value pair + it, and add that as a key value pair. For instance, consider the following examples (all taken from spots in the Kubernetes codebase): @@ -150,34 +220,59 @@ Kubernetes codebase): response when requesting url", "attempt", retries, "after seconds", seconds, "url", url)` -If you *really* must use a format string, place it as a key value, and -call `fmt.Sprintf` yourself -- for instance, `log.Printf("unable to +If you *really* must use a format string, use it in a key's value, and +call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to reflect over type %T")` becomes `logger.Info("unable to reflect over type", "type", fmt.Sprintf("%T"))`. In general though, the cases where this is necessary should be few and far between. -## How do I choose my V-levels? +#### How do I choose my V-levels? This is basically the only hard constraint: increase V-levels to denote more verbose or more debug-y logs. Otherwise, you can start out with `0` as "you always want to see this", `1` as "common logging that you might *possibly* want to turn off", and -`10` as "I would like to performance-test your log collection stack". +`10` as "I would like to performance-test your log collection stack." Then gradually choose levels in between as you need them, working your way down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs). +info-type logs.) + +#### How do I choose my keys? -## How do I choose my keys +Keys are fairly flexible, and can hold more or less any string +value. For best compatibility with implementations and consistency +with existing code in other projects, there are a few conventions you +should consider. -- make your keys human-readable -- constant keys are generally a good idea -- be consistent across your codebase -- keys should naturally match parts of the message string +- Make your keys human-readable. +- Constant keys are generally a good idea. +- Be consistent across your codebase. +- Keys should naturally match parts of the message string. +- Use lower case for simple keys and + [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for + more complex ones. Kubernetes is one example of a project that has + [adopted that + convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments). While key names are mostly unrestricted (and spaces are acceptable), it's generally a good idea to stick to printable ascii characters, or at least match the general character set of your log lines. +#### Why should keys be constant values? + +The point of structured logging is to make later log processing easier. Your +keys are, effectively, the schema of each log message. If you use different +keys across instances of the same log line, you will make your structured logs +much harder to use. `Sprintf()` is for values, not for keys! + +#### Why is this not a pure interface? + +The Logger type is implemented as a struct in order to allow the Go compiler to +optimize things like high-V `Info` logs that are not triggered. Not all of +these implementations are implemented yet, but this structure was suggested as +a way to ensure they *can* be implemented. All of the real work is behind the +`LogSink` interface. + [warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go index 2bafb13d15..9d92a38f1d 100644 --- a/vendor/github.com/go-logr/logr/discard.go +++ b/vendor/github.com/go-logr/logr/discard.go @@ -16,36 +16,39 @@ limitations under the License. package logr -// Discard returns a valid Logger that discards all messages logged to it. -// It can be used whenever the caller is not interested in the logs. +// Discard returns a Logger that discards all messages logged to it. It can be +// used whenever the caller is not interested in the logs. Logger instances +// produced by this function always compare as equal. func Discard() Logger { - return DiscardLogger{} + return Logger{ + level: 0, + sink: discardLogSink{}, + } } -// DiscardLogger is a Logger that discards all messages. -type DiscardLogger struct{} +// discardLogSink is a LogSink that discards all messages. +type discardLogSink struct{} -func (l DiscardLogger) Enabled() bool { - return false +// Verify that it actually implements the interface +var _ LogSink = discardLogSink{} + +func (l discardLogSink) Init(RuntimeInfo) { } -func (l DiscardLogger) Info(msg string, keysAndValues ...interface{}) { +func (l discardLogSink) Enabled(int) bool { + return false } -func (l DiscardLogger) Error(err error, msg string, keysAndValues ...interface{}) { +func (l discardLogSink) Info(int, string, ...interface{}) { } -func (l DiscardLogger) V(level int) Logger { - return l +func (l discardLogSink) Error(error, string, ...interface{}) { } -func (l DiscardLogger) WithValues(keysAndValues ...interface{}) Logger { +func (l discardLogSink) WithValues(...interface{}) LogSink { return l } -func (l DiscardLogger) WithName(name string) Logger { +func (l discardLogSink) WithName(string) LogSink { return l } - -// Verify that it actually implements the interface -var _ Logger = DiscardLogger{} diff --git a/vendor/github.com/go-logr/logr/go.mod b/vendor/github.com/go-logr/logr/go.mod index 591884e91f..7baec9b570 100644 --- a/vendor/github.com/go-logr/logr/go.mod +++ b/vendor/github.com/go-logr/logr/go.mod @@ -1,3 +1,3 @@ module github.com/go-logr/logr -go 1.14 +go 1.16 diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index 842428bd3a..c05482a203 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -16,83 +16,104 @@ limitations under the License. // This design derives from Dave Cheney's blog: // http://dave.cheney.net/2015/11/05/lets-talk-about-logging -// -// This is a BETA grade API. Until there is a significant 2nd implementation, -// I don't really know how it will change. -// Package logr defines abstract interfaces for logging. Packages can depend on -// these interfaces and callers can implement logging in whatever way is -// appropriate. +// Package logr defines a general-purpose logging API and abstract interfaces +// to back that API. Packages in the Go ecosystem can depend on this package, +// while callers can implement logging with whatever backend is appropriate. // // Usage // -// Logging is done using a Logger. Loggers can have name prefixes and named -// values attached, so that all log messages logged with that Logger have some -// base context associated. +// Logging is done using a Logger instance. Logger is a concrete type with +// methods, which defers the actual logging to a LogSink interface. The main +// methods of Logger are Info() and Error(). Arguments to Info() and Error() +// are key/value pairs rather than printf-style formatted strings, emphasizing +// "structured logging". // -// The term "key" is used to refer to the name associated with a particular -// value, to disambiguate it from the general Logger name. +// With Go's standard log package, we might write: +// log.Printf("setting target value %s", targetValue) // -// For instance, suppose we're trying to reconcile the state of an object, and -// we want to log that we've made some decision. +// With logr's structured logging, we'd write: +// logger.Info("setting target", "value", targetValue) // -// With the traditional log package, we might write: -// log.Printf("decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) +// Errors are much the same. Instead of: +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) // -// With logr's structured logging, we'd write: -// // elsewhere in the file, set up the logger to log with the prefix of -// // "reconcilers", and the named value target-type=Foo, for extra context. -// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo") +// We'd write: +// logger.Error(err, "failed to open the pod bay door", "user", user) // -// // later on... -// log.Info("setting foo on object", "value", targetValue, "object", object) +// Info() and Error() are very similar, but they are separate methods so that +// LogSink implementations can choose to do things like attach additional +// information (such as stack traces) on calls to Error(). Error() messages are +// always logged, regardless of the current verbosity. If there is no error +// instance available, passing nil is valid. +// +// Verbosity +// +// Often we want to log information only when the application in "verbose +// mode". To write log lines that are more verbose, Logger has a V() method. +// The higher the V-level of a log line, the less critical it is considered. +// Log-lines with V-levels that are not enabled (as per the LogSink) will not +// be written. Level V(0) is the default, and logger.V(0).Info() has the same +// meaning as logger.Info(). Negative V-levels have the same meaning as V(0). +// Error messages do not have a verbosity level and are always logged. +// +// Where we might have written: +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } +// +// We can write: +// logger.V(2).Info("an unusual thing happened") +// +// Logger Names +// +// Logger instances can have name strings so that all messages logged through +// that instance have additional context. For example, you might want to add +// a subsystem name: // -// Depending on our logging implementation, we could then make logging decisions -// based on field values (like only logging such events for objects in a certain -// namespace), or copy the structured information into a structured log store. +// logger.WithName("compactor").Info("started", "time", time.Now()) // -// For logging errors, Logger has a method called Error. Suppose we wanted to -// log an error while reconciling. With the traditional log package, we might -// write: -// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err) +// The WithName() method returns a new Logger, which can be passed to +// constructors or other functions for further use. Repeated use of WithName() +// will accumulate name "segments". These name segments will be joined in some +// way by the LogSink implementation. It is strongly recommended that name +// segments contain simple identifiers (letters, digits, and hyphen), and do +// not contain characters that could muddle the log output or confuse the +// joining operation (e.g. whitespace, commas, periods, slashes, brackets, +// quotes, etc). // -// With logr, we'd instead write: -// // assuming the above setup for log -// log.Error(err, "unable to reconcile object", "object", object) +// Saved Values // -// This functions similarly to: -// log.Info("unable to reconcile object", "error", err, "object", object) +// Logger instances can store any number of key/value pairs, which will be +// logged alongside all messages logged through that instance. For example, +// you might want to create a Logger instance per managed object: // -// However, it ensures that a standard key for the error value ("error") is used -// across all error logging. Furthermore, certain implementations may choose to -// attach additional information (such as stack traces) on calls to Error, so -// it's preferred to use Error to log errors. +// With the standard log package, we might write: +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) // -// Parts of a log line +// With logr we'd write: +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) // -// Each log message from a Logger has four types of context: -// logger name, log verbosity, log message, and the named values. +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) // -// The Logger name consists of a series of name "segments" added by successive -// calls to WithName. These name segments will be joined in some way by the -// underlying implementation. It is strongly recommended that name segments -// contain simple identifiers (letters, digits, and hyphen), and do not contain -// characters that could muddle the log output or confuse the joining operation -// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc). +// Best Practices // -// Log verbosity represents how little a log matters. Level zero, the default, -// matters most. Increasing levels matter less and less. Try to avoid lots of -// different verbosity levels, and instead provide useful keys, logger names, -// and log messages for users to filter on. It's illegal to pass a log level -// below zero. +// Logger has very few hard rules, with the goal that LogSink implementations +// might have a lot of freedom to differentiate. There are, however, some +// things to consider. // // The log message consists of a constant message attached to the log line. // This should generally be a simple description of what's occurring, and should -// never be a format string. +// never be a format string. Variable information can then be attached using +// named values. // -// Variable information can then be attached using named values (key/value -// pairs). Keys are arbitrary strings, while values may be any Go value. +// Keys are arbitrary strings, but should generally be constant values. Values +// may be any Go value, but how the value is formatted is determined by the +// LogSink implementation. // // Key Naming Conventions // @@ -102,6 +123,7 @@ limitations under the License. // * be constant (not dependent on input data) // * contain only printable characters // * not contain whitespace or punctuation +// * use lower case for simple keys and lowerCamelCase for more complex ones // // These guidelines help ensure that log data is processed properly regardless // of the log implementation. For example, log implementations will try to @@ -110,21 +132,22 @@ limitations under the License. // While users are generally free to use key names of their choice, it's // generally best to avoid using the following keys, as they're frequently used // by implementations: -// -// * `"caller"`: the calling information (file/line) of a particular log line. -// * `"error"`: the underlying error value in the `Error` method. -// * `"level"`: the log level. -// * `"logger"`: the name of the associated logger. -// * `"msg"`: the log message. -// * `"stacktrace"`: the stack trace associated with a particular log line or -// error (often from the `Error` message). -// * `"ts"`: the timestamp for a log line. +// * "caller": the calling information (file/line) of a particular log line +// * "error": the underlying error value in the `Error` method +// * "level": the log level +// * "logger": the name of the associated logger +// * "msg": the log message +// * "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// * "ts": the timestamp for a log line // // Implementations are encouraged to make use of these keys to represent the // above concepts, when necessary (for example, in a pure-JSON output form, it // would be necessary to represent at least message and timestamp as ordinary // named values). // +// Break Glass +// // Implementations may choose to give callers access to the underlying // logging implementation. The recommended pattern for this is: // // Underlier exposes access to the underlying logging implementation. @@ -134,81 +157,222 @@ limitations under the License. // type Underlier interface { // GetUnderlying() // } +// +// Logger grants access to the sink to enable type assertions like this: +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink()(impl.Underlier) { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } +// +// Custom `With*` functions can be implemented by copying the complete +// Logger struct and replacing the sink in the copy: +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } +// +// Don't use New to construct a new Logger with a LogSink retrieved from an +// existing Logger. Source code attribution might not work correctly and +// unexported fields in Logger get lost. +// +// Beware that the same LogSink instance may be shared by different logger +// instances. Calling functions that modify the LogSink will affect all of +// those. package logr import ( "context" ) -// TODO: consider adding back in format strings if they're really needed -// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects) -// TODO: consider other bits of glog functionality like Flush, OutputStats +// New returns a new Logger instance. This is primarily used by libraries +// implementing LogSink, rather than end users. +func New(sink LogSink) Logger { + logger := Logger{} + logger.setSink(sink) + sink.Init(runtimeInfo) + return logger +} -// Logger represents the ability to log messages, both errors and not. -type Logger interface { - // Enabled tests whether this Logger is enabled. For example, commandline - // flags might be used to set the logging verbosity and disable some info - // logs. - Enabled() bool +// setSink stores the sink and updates any related fields. It mutates the +// logger and thus is only safe to use for loggers that are not currently being +// used concurrently. +func (l *Logger) setSink(sink LogSink) { + l.sink = sink +} - // Info logs a non-error message with the given key/value pairs as context. - // - // The msg argument should be used to add some constant description to - // the log line. The key/value pairs can then be used to add additional - // variable information. The key/value pairs should alternate string - // keys and arbitrary values. - Info(msg string, keysAndValues ...interface{}) - - // Error logs an error, with the given message and key/value pairs as context. - // It functions similarly to calling Info with the "error" named value, but may - // have unique behavior, and should be preferred for logging errors (see the - // package documentations for more information). - // - // The msg field should be used to add context to any underlying error, - // while the err field should be used to attach the actual error that - // triggered this log line, if present. - Error(err error, msg string, keysAndValues ...interface{}) +// GetSink returns the stored sink. +func (l Logger) GetSink() LogSink { + return l.sink +} + +// WithSink returns a copy of the logger with the new sink. +func (l Logger) WithSink(sink LogSink) Logger { + l.setSink(sink) + return l +} + +// Logger is an interface to an abstract logging implementation. This is a +// concrete type for performance reasons, but all the real work is passed on to +// a LogSink. Implementations of LogSink should provide their own constructors +// that return Logger, not LogSink. +// +// The underlying sink can be accessed through GetSink and be modified through +// WithSink. This enables the implementation of custom extensions (see "Break +// Glass" in the package documentation). Normally the sink should be used only +// indirectly. +type Logger struct { + sink LogSink + level int +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info logs. +func (l Logger) Enabled() bool { + return l.sink.Enabled(l.level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to the log +// line. The key/value pairs can then be used to add additional variable +// information. The key/value pairs must alternate string keys and arbitrary +// values. +func (l Logger) Info(msg string, keysAndValues ...interface{}) { + if l.Enabled() { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Info(l.level, msg, keysAndValues...) + } +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to Info, but may have unique behavior, and should be +// preferred for logging errors (see the package documentations for more +// information). The log message will always be emitted, regardless of +// verbosity level. +// +// The msg argument should be used to add context to any underlying error, +// while the err argument should be used to attach the actual error that +// triggered this log line, if present. The err parameter is optional +// and nil may be passed instead of an error instance. +func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Error(err, msg, keysAndValues...) +} + +// V returns a new Logger instance for a specific verbosity level, relative to +// this Logger. In other words, V-levels are additive. A higher verbosity +// level means a log message is less important. Negative V-levels are treated +// as 0. +func (l Logger) V(level int) Logger { + if level < 0 { + level = 0 + } + l.level += level + return l +} + +// WithValues returns a new Logger instance with additional key/value pairs. +// See Info for documentation on how key/value pairs work. +func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + l.setSink(l.sink.WithValues(keysAndValues...)) + return l +} - // V returns an Logger value for a specific verbosity level, relative to - // this Logger. In other words, V values are additive. V higher verbosity - // level means a log message is less important. It's illegal to pass a log - // level less than zero. - V(level int) Logger - - // WithValues adds some key-value pairs of context to a logger. - // See Info for documentation on how key/value pairs work. - WithValues(keysAndValues ...interface{}) Logger - - // WithName adds a new element to the logger's name. - // Successive calls with WithName continue to append - // suffixes to the logger's name. It's strongly recommended - // that name segments contain only letters, digits, and hyphens - // (see the package documentation for more information). - WithName(name string) Logger +// WithName returns a new Logger instance with the specified name element added +// to the Logger's name. Successive calls with WithName append additional +// suffixes to the Logger's name. It's strongly recommended that name segments +// contain only letters, digits, and hyphens (see the package documentation for +// more information). +func (l Logger) WithName(name string) Logger { + l.setSink(l.sink.WithName(name)) + return l } -// InfoLogger provides compatibility with code that relies on the v0.1.0 -// interface. +// WithCallDepth returns a Logger instance that offsets the call stack by the +// specified number of frames when logging call site information, if possible. +// This is useful for users who have helper functions between the "real" call +// site and the actual calls to Logger methods. If depth is 0 the attribution +// should be to the direct caller of this function. If depth is 1 the +// attribution should skip 1 call frame, and so on. Successive calls to this +// are additive. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// it will be called and the result returned. If the implementation does not +// support CallDepthLogSink, the original Logger will be returned. +// +// To skip one level, WithCallStackHelper() should be used instead of +// WithCallDepth(1) because it works with implementions that support the +// CallDepthLogSink and/or CallStackHelperLogSink interfaces. +func (l Logger) WithCallDepth(depth int) Logger { + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(depth)) + } + return l +} + +// WithCallStackHelper returns a new Logger instance that skips the direct +// caller when logging call site information, if possible. This is useful for +// users who have helper functions between the "real" call site and the actual +// calls to Logger methods and want to support loggers which depend on marking +// each individual helper function, like loggers based on testing.T. +// +// In addition to using that new logger instance, callers also must call the +// returned function. // -// Deprecated: InfoLogger is an artifact of early versions of this API. New -// users should never use it and existing users should use Logger instead. This -// will be removed in a future release. -type InfoLogger = Logger +// If the underlying log implementation supports a WithCallDepth(int) method, +// WithCallDepth(1) will be called to produce a new logger. If it supports a +// WithCallStackHelper() method, that will be also called. If the +// implementation does not support either of these, the original Logger will be +// returned. +func (l Logger) WithCallStackHelper() (func(), Logger) { + var helper func() + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(1)) + } + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + helper = withHelper.GetCallStackHelper() + } else { + helper = func() {} + } + return helper, l +} +// contextKey is how we find Loggers in a context.Context. type contextKey struct{} -// FromContext returns a Logger constructed from ctx or nil if no -// logger details are found. -func FromContext(ctx context.Context) Logger { +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v + return v, nil } - return nil + return Logger{}, notFoundError{} } -// FromContextOrDiscard returns a Logger constructed from ctx or a Logger -// that discards all messages if no logger details are found. +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. func FromContextOrDiscard(ctx context.Context) Logger { if v, ok := ctx.Value(contextKey{}).(Logger); ok { return v @@ -217,12 +381,59 @@ func FromContextOrDiscard(ctx context.Context) Logger { return Discard() } -// NewContext returns a new context derived from ctx that embeds the Logger. -func NewContext(ctx context.Context, l Logger) context.Context { - return context.WithValue(ctx, contextKey{}, l) +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) } -// CallDepthLogger represents a Logger that knows how to climb the call stack +// RuntimeInfo holds information that the logr "core" library knows which +// LogSinks might want to know. +type RuntimeInfo struct { + // CallDepth is the number of call frames the logr library adds between the + // end-user and the LogSink. LogSink implementations which choose to print + // the original logging site (e.g. file & line) should climb this many + // additional frames to find it. + CallDepth int +} + +// runtimeInfo is a static global. It must not be changed at run time. +var runtimeInfo = RuntimeInfo{ + CallDepth: 1, +} + +// LogSink represents a logging implementation. End-users will generally not +// interact with this type. +type LogSink interface { + // Init receives optional information about the logr library for LogSink + // implementations that need it. + Init(info RuntimeInfo) + + // Enabled tests whether this LogSink is enabled at the specified V-level. + // For example, commandline flags might be used to set the logging + // verbosity and disable some info logs. + Enabled(level int) bool + + // Info logs a non-error message with the given key/value pairs as context. + // The level argument is provided for optional logging. This method will + // only be called when Enabled(level) is true. See Logger.Info for more + // details. + Info(level int, msg string, keysAndValues ...interface{}) + + // Error logs an error, with the given message and key/value pairs as + // context. See Logger.Error for more details. + Error(err error, msg string, keysAndValues ...interface{}) + + // WithValues returns a new LogSink with additional key/value pairs. See + // Logger.WithValues for more details. + WithValues(keysAndValues ...interface{}) LogSink + + // WithName returns a new LogSink with the specified name appended. See + // Logger.WithName for more details. + WithName(name string) LogSink +} + +// CallDepthLogSink represents a Logger that knows how to climb the call stack // to identify the original call site and can offset the depth by a specified // number of frames. This is useful for users who have helper functions // between the "real" call site and the actual calls to Logger methods. @@ -232,35 +443,59 @@ func NewContext(ctx context.Context, l Logger) context.Context { // // This is an optional interface and implementations are not required to // support it. -type CallDepthLogger interface { - Logger - - // WithCallDepth returns a Logger that will offset the call stack by the - // specified number of frames when logging call site information. If depth - // is 0 the attribution should be to the direct caller of this method. If - // depth is 1 the attribution should skip 1 call frame, and so on. +type CallDepthLogSink interface { + // WithCallDepth returns a LogSink that will offset the call + // stack by the specified number of frames when logging call + // site information. + // + // If depth is 0, the LogSink should skip exactly the number + // of call frames defined in RuntimeInfo.CallDepth when Info + // or Error are called, i.e. the attribution should be to the + // direct caller of Logger.Info or Logger.Error. + // + // If depth is 1 the attribution should skip 1 call frame, and so on. // Successive calls to this are additive. - WithCallDepth(depth int) Logger + WithCallDepth(depth int) LogSink } -// WithCallDepth returns a Logger that will offset the call stack by the -// specified number of frames when logging call site information, if possible. -// This is useful for users who have helper functions between the "real" call -// site and the actual calls to Logger methods. If depth is 0 the attribution -// should be to the direct caller of this function. If depth is 1 the -// attribution should skip 1 call frame, and so on. Successive calls to this -// are additive. +// CallStackHelperLogSink represents a Logger that knows how to climb +// the call stack to identify the original call site and can skip +// intermediate helper functions if they mark themselves as +// helper. Go's testing package uses that approach. // -// If the underlying log implementation supports the CallDepthLogger interface, -// the WithCallDepth method will be called and the result returned. If the -// implementation does not support CallDepthLogger, the original Logger will be -// returned. +// This is useful for users who have helper functions between the +// "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as +// file, function, or line) would otherwise log information about the +// intermediate helper functions. // -// Callers which care about whether this was supported or not should test for -// CallDepthLogger support themselves. -func WithCallDepth(logger Logger, depth int) Logger { - if decorator, ok := logger.(CallDepthLogger); ok { - return decorator.WithCallDepth(depth) - } - return logger +// This is an optional interface and implementations are not required +// to support it. Implementations that choose to support this must not +// simply implement it as WithCallDepth(1), because +// Logger.WithCallStackHelper will call both methods if they are +// present. This should only be implemented for LogSinks that actually +// need it, as with testing.T. +type CallStackHelperLogSink interface { + // GetCallStackHelper returns a function that must be called + // to mark the direct caller as helper function when logging + // call site information. + GetCallStackHelper() func() +} + +// Marshaler is an optional interface that logged values may choose to +// implement. Loggers with structured output, such as JSON, should +// log the object return by the MarshalLog method instead of the +// original value. +type Marshaler interface { + // MarshalLog can be used to: + // - ensure that structs are not logged as strings when the original + // value has a String method: return a different type without a + // String method + // - select which fields of a complex type should get logged: + // return a simpler struct with fewer fields + // - log unexported fields: return a different struct + // with exported fields + // + // It may return any value of any type. + MarshalLog() interface{} } diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS index 203e84ebab..52ccb5a934 100644 --- a/vendor/github.com/golang/snappy/AUTHORS +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -10,6 +10,7 @@ Amazon.com, Inc Damian Gryski +Eric Buth Google Inc. Jan Mercl <0xjnml@gmail.com> Klaus Post diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS index d9914732b5..ea6524ddd0 100644 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -26,7 +26,9 @@ # Please keep the list sorted. +Alex Legg Damian Gryski +Eric Buth Jan Mercl <0xjnml@gmail.com> Jonathan Swinney Kai Backman diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go index f1e04b172c..23c6e26c6b 100644 --- a/vendor/github.com/golang/snappy/decode.go +++ b/vendor/github.com/golang/snappy/decode.go @@ -118,32 +118,23 @@ func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { return true } -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } +func (r *Reader) fill() error { + for r.i >= r.j { if !r.readFull(r.buf[:4], true) { - return 0, r.err + return r.err } chunkType := r.buf[0] if !r.readHeader { if chunkType != chunkTypeStreamIdentifier { r.err = ErrCorrupt - return 0, r.err + return r.err } r.readHeader = true } chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 if chunkLen > len(r.buf) { r.err = ErrUnsupported - return 0, r.err + return r.err } // The chunk types are specified at @@ -153,11 +144,11 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.2. Compressed data (chunk type 0x00). if chunkLen < checksumSize { r.err = ErrCorrupt - return 0, r.err + return r.err } buf := r.buf[:chunkLen] if !r.readFull(buf, false) { - return 0, r.err + return r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 buf = buf[checksumSize:] @@ -165,19 +156,19 @@ func (r *Reader) Read(p []byte) (int, error) { n, err := DecodedLen(buf) if err != nil { r.err = err - return 0, r.err + return r.err } if n > len(r.decoded) { r.err = ErrCorrupt - return 0, r.err + return r.err } if _, err := Decode(r.decoded, buf); err != nil { r.err = err - return 0, r.err + return r.err } if crc(r.decoded[:n]) != checksum { r.err = ErrCorrupt - return 0, r.err + return r.err } r.i, r.j = 0, n continue @@ -186,25 +177,25 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.3. Uncompressed data (chunk type 0x01). if chunkLen < checksumSize { r.err = ErrCorrupt - return 0, r.err + return r.err } buf := r.buf[:checksumSize] if !r.readFull(buf, false) { - return 0, r.err + return r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 // Read directly into r.decoded instead of via r.buf. n := chunkLen - checksumSize if n > len(r.decoded) { r.err = ErrCorrupt - return 0, r.err + return r.err } if !r.readFull(r.decoded[:n], false) { - return 0, r.err + return r.err } if crc(r.decoded[:n]) != checksum { r.err = ErrCorrupt - return 0, r.err + return r.err } r.i, r.j = 0, n continue @@ -213,15 +204,15 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.1. Stream identifier (chunk type 0xff). if chunkLen != len(magicBody) { r.err = ErrCorrupt - return 0, r.err + return r.err } if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err + return r.err } for i := 0; i < len(magicBody); i++ { if r.buf[i] != magicBody[i] { r.err = ErrCorrupt - return 0, r.err + return r.err } } continue @@ -230,12 +221,44 @@ func (r *Reader) Read(p []byte) (int, error) { if chunkType <= 0x7f { // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). r.err = ErrUnsupported - return 0, r.err + return r.err } // Section 4.4 Padding (chunk type 0xfe). // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). if !r.readFull(r.buf[:chunkLen], false) { - return 0, r.err + return r.err } } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil } diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s index bf83667d71..f8d54adfc5 100644 --- a/vendor/github.com/golang/snappy/encode_arm64.s +++ b/vendor/github.com/golang/snappy/encode_arm64.s @@ -382,7 +382,7 @@ inner0: // if load32(src, s) != load32(src, candidate) { continue } break MOVW 0(R7), R3 - MOVW (R6)(R15*1), R4 + MOVW (R6)(R15), R4 CMPW R4, R3 BNE inner0 @@ -672,7 +672,7 @@ inlineEmitCopyEnd: MOVHU R3, 0(R17)(R11<<1) // if uint32(x>>8) == load32(src, candidate) { continue } - MOVW (R6)(R15*1), R4 + MOVW (R6)(R15), R4 CMPW R4, R14 BEQ inner1 diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml index dad29725f8..d324c43ba4 100644 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -1,4 +1,7 @@ language: go +arch: + - amd64 + - ppc64le install: - go get -t - go get golang.org/x/tools/cmd/cover diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index 876abb500a..aa8cbd7ce6 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -97,7 +97,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [janoszen/containerssh](https://github.com/janoszen/containerssh) +- [containerssh/containerssh](https://github.com/containerssh/containerssh) ## Install diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index afa84a1e29..8c2a8fcd90 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -95,13 +95,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } } else { - if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { dst.Set(src) } } case reflect.Map: if dst.IsNil() && !src.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) + if dst.CanSet() { + dst.Set(reflect.MakeMap(dst.Type())) + } else { + dst = src + return + } } if src.Kind() != reflect.Map { diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 0000000000..402433593c --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 0000000000..b35f8449bf --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/s2/cmd/_s2sx/sfx-exe diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 0000000000..c9014ce1da --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,137 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + - ./gen.sh + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" + replacements: + aix: AIX + darwin: OSX + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 + freebsd: FreeBSD + netbsd: NetBSD + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm + replacements: + darwin: Darwin + linux: Linux + freebsd: FreeBSD + amd64: x86_64 diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE index 1eb75ef68e..87d5574777 100644 --- a/vendor/github.com/klauspost/compress/LICENSE +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -26,3 +26,279 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 0000000000..3429879eb6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,438 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. +* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) + +
+ See changes prior to v1.12.1 + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes prior to v1.11.0 + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/). +* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/). +* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). +* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +``` + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 0000000000..ea5a692d51 --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 0000000000..aff942205f --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/go.mod b/vendor/github.com/klauspost/compress/go.mod new file mode 100644 index 0000000000..5aa64a436a --- /dev/null +++ b/vendor/github.com/klauspost/compress/go.mod @@ -0,0 +1,3 @@ +module github.com/klauspost/compress + +go 1.15 diff --git a/vendor/github.com/klauspost/compress/go.sum b/vendor/github.com/klauspost/compress/go.sum new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 0823c928ce..8323dc0538 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -161,6 +161,70 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error) return s.Out, false, nil } +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + func (s *Scratch) compress1X(src []byte) ([]byte, error) { return s.compress1xDo(s.Out, src) } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 41703bba4d..9b7cc8e97b 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -344,35 +344,241 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { var buf [256]byte var off uint8 - shift := (8 - d.actualTableLog) & 7 - - //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) - for br.off >= 4 { - br.fillFast() - v := dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded + switch d.actualTableLog { + case 8: + const shift = 8 - 8 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) } - dst = append(dst, buf[:]...) } + default: + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) } if len(dst)+int(off) > maxDecodedSize { @@ -383,6 +589,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { // br < 4, so uint8 is fine bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + for bitsLeft > 0 { if br.bitsRead >= 64-8 { for br.off > 0 { @@ -423,24 +631,24 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { var buf [256]byte var off uint8 - const shift = 0 + const shift = 56 //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) for br.off >= 4 { br.fillFast() - v := dt[br.peekByteFast()>>shift] + v := dt[uint8(br.value>>shift)] br.advance(uint8(v.entry)) buf[off+0] = uint8(v.entry >> 8) - v = dt[br.peekByteFast()>>shift] + v = dt[uint8(br.value>>shift)] br.advance(uint8(v.entry)) buf[off+1] = uint8(v.entry >> 8) - v = dt[br.peekByteFast()>>shift] + v = dt[uint8(br.value>>shift)] br.advance(uint8(v.entry)) buf[off+2] = uint8(v.entry >> 8) - v = dt[br.peekByteFast()>>shift] + v = dt[uint8(br.value>>shift)] br.advance(uint8(v.entry)) buf[off+3] = uint8(v.entry >> 8) @@ -474,7 +682,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { br.close() return nil, ErrMaxDecodedSizeExceeded } - v := dt[br.peekByteFast()>>shift] + v := dt[br.peekByteFast()] nBits := uint8(v.entry) br.advance(nBits) bitsLeft -= int8(nBits) @@ -709,7 +917,6 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { shift := (8 - d.actualTableLog) & 7 const tlSize = 1 << 8 - const tlMask = tlSize - 1 single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 7ec2022b65..3ee00ecb47 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -245,6 +245,68 @@ func (c cTable) write(s *Scratch) error { return nil } +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + // estimateSize returns the estimated size in bytes of the input represented in the // histogram supplied. func (c cTable) estimateSize(hist []uint32) int { diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE new file mode 100644 index 0000000000..6050c10f4c --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go new file mode 100644 index 0000000000..40796a49d6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go new file mode 100644 index 0000000000..77395a6b8b --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go new file mode 100644 index 0000000000..13c6040a5d --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go new file mode 100644 index 0000000000..511bba65db --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -0,0 +1,236 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go new file mode 100644 index 0000000000..34d01f4aa6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snapref implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snapref + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 0000000000..2263853fca --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.16 + diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index 787813fa9e..c8f0f16fc1 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -152,7 +152,7 @@ file out level insize outsize millis mb/s silesia.tar zskp 1 211947520 73101992 643 313.87 silesia.tar zskp 2 211947520 67504318 969 208.38 silesia.tar zskp 3 211947520 64595893 2007 100.68 -silesia.tar zskp 4 211947520 60995370 7691 26.28 +silesia.tar zskp 4 211947520 60995370 8825 22.90 cgo zstd: silesia.tar zstd 1 211947520 73605392 543 371.56 @@ -162,7 +162,7 @@ silesia.tar zstd 9 211947520 60212393 5063 39.92 gzip, stdlib/this package: silesia.tar gzstd 1 211947520 80007735 1654 122.21 -silesia.tar gzkp 1 211947520 80369488 1168 173.06 +silesia.tar gzkp 1 211947520 80136201 1152 175.45 GOB stream of binary data. Highly compressible. https://files.klauspost.com/compress/gob-stream.7z @@ -171,13 +171,15 @@ file out level insize outsize millis mb/s gob-stream zskp 1 1911399616 235022249 3088 590.30 gob-stream zskp 2 1911399616 205669791 3786 481.34 gob-stream zskp 3 1911399616 175034659 9636 189.17 -gob-stream zskp 4 1911399616 167273881 29337 62.13 +gob-stream zskp 4 1911399616 165609838 50369 36.19 + gob-stream zstd 1 1911399616 249810424 2637 691.26 gob-stream zstd 3 1911399616 208192146 3490 522.31 gob-stream zstd 6 1911399616 193632038 6687 272.56 gob-stream zstd 9 1911399616 177620386 16175 112.70 + gob-stream gzstd 1 1911399616 357382641 10251 177.82 -gob-stream gzkp 1 1911399616 362156523 5695 320.08 +gob-stream gzkp 1 1911399616 359753026 5438 335.20 The test data for the Large Text Compression Benchmark is the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. @@ -187,11 +189,13 @@ file out level insize outsize millis mb/s enwik9 zskp 1 1000000000 343848582 3609 264.18 enwik9 zskp 2 1000000000 317276632 5746 165.97 enwik9 zskp 3 1000000000 292243069 12162 78.41 -enwik9 zskp 4 1000000000 275241169 36430 26.18 +enwik9 zskp 4 1000000000 262183768 82837 11.51 + enwik9 zstd 1 1000000000 358072021 3110 306.65 enwik9 zstd 3 1000000000 313734672 4784 199.35 enwik9 zstd 6 1000000000 295138875 10290 92.68 enwik9 zstd 9 1000000000 278348700 28549 33.40 + enwik9 gzstd 1 1000000000 382578136 9604 99.30 enwik9 gzkp 1 1000000000 383825945 6544 145.73 @@ -202,13 +206,15 @@ file out level insize outsize millis mb/s github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75 -github-june-2days-2019.json zskp 4 6273951764 503314661 93811 63.78 +github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16 + github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 -github-june-2days-2019.json gzkp 1 6273951764 1128755542 19236 311.03 +github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61 VM Image, Linux mint with a few installed applications: https://files.klauspost.com/compress/rawstudio-mint14.7z @@ -217,13 +223,15 @@ file out level insize outsize millis mb/s rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08 -rawstudio-mint14.tar zskp 4 8558382592 3020370044 404956 20.16 +rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52 + rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 -rawstudio-mint14.tar gzkp 1 8558382592 3970463184 41749 195.49 +rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92 CSV data: https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst @@ -232,13 +240,15 @@ file out level insize outsize millis mb/s nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66 -nyc-taxi-data-10M.csv zskp 4 3325605752 490907191 65939 48.10 +nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33 + nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 -nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00 ``` ## Decompressor diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index e30af505ca..8a98c4562e 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -168,10 +168,10 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { // Read block data. if cap(b.dataStorage) < cSize { - if b.lowMem { + if b.lowMem || cSize > maxCompressedBlockSize { b.dataStorage = make([]byte, 0, cSize) } else { - b.dataStorage = make([]byte, 0, maxBlockSize) + b.dataStorage = make([]byte, 0, maxCompressedBlockSize) } } if cap(b.dst) <= maxSize { diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 4d984c3b26..f430f58b57 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -260,9 +260,10 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) { if len(d.current.b) > 0 { n2, err2 := w.Write(d.current.b) n += int64(n2) - if err2 != nil && d.current.err == nil { + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { d.current.err = err2 - break + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite } } if d.current.err != nil { diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index c0fd058c28..95cc9b8b81 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -17,14 +17,16 @@ type decoderOptions struct { lowMem bool concurrent int maxDecodedSize uint64 + maxWindowSize uint64 dicts []dict } func (o *decoderOptions) setDefault() { *o = decoderOptions{ // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, } o.maxDecodedSize = 1 << 63 } @@ -52,7 +54,6 @@ func WithDecoderConcurrency(n int) DOption { // WithDecoderMaxMemory allows to set a maximum decoded size for in-memory // non-streaming operations or maximum window size for streaming operations. // This can be used to control memory usage of potentially hostile content. -// For streaming operations, the maximum window size is capped at 1<<30 bytes. // Maximum and default is 1 << 63 bytes. func WithDecoderMaxMemory(n uint64) DOption { return func(o *decoderOptions) error { @@ -81,3 +82,21 @@ func WithDecoderDicts(dicts ...[]byte) DOption { return nil } } + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 60f2986486..295cd602a4 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -38,8 +38,8 @@ func (e *fastBase) AppendCRC(dst []byte) []byte { // WindowSize returns the window size of the encoder, // or a window size small enough to contain the input size, if > 0. -func (e *fastBase) WindowSize(size int) int32 { - if size > 0 && size < int(e.maxMatchOff) { +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { b := int32(1) << uint(bits.Len(uint(size))) // Keep minimum window. if b < 1024 { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index b7d4b90047..96028ecd83 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -5,22 +5,61 @@ package zstd import ( + "bytes" "fmt" - "math/bits" + + "github.com/klauspost/compress" ) const ( - bestLongTableBits = 20 // Bits used in the long match table + bestLongTableBits = 22 // Bits used in the long match table bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash // Note: Increasing the short table bits or making the hash shorter // can actually lead to compression degradation since it will 'steal' more from the // long match table and match offsets are quite big. // This greatly depends on the type of input. - bestShortTableBits = 16 // Bits used in the short match table + bestShortTableBits = 18 // Bits used in the short match table bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + ) +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 +} + +const highScore = 25000 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep)) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + // bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. // The long match table contains the previous entry with the same hash, // effectively making it a "chain" of length 2. @@ -109,6 +148,14 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { return } + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + // Override src src = e.hist sLimit := int32(len(src)) - inputMargin @@ -145,51 +192,49 @@ encodeLoop: panic("offset0 was 0") } - type match struct { - offset int32 - s int32 - length int32 - rep int32 - } - matchAt := func(offset int32, s int32, first uint32, rep int32) match { - if s-offset >= e.maxMatchOff || load3232(src, offset) != first { - return match{offset: offset, s: s} - } - return match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} - } - bestOf := func(a, b match) match { - aScore := b.s - a.s + a.length - bScore := a.s - b.s + b.length - if a.rep < 0 { - aScore = aScore - int32(bits.Len32(uint32(a.offset)))/8 - } - if b.rep < 0 { - bScore = bScore - int32(bits.Len32(uint32(b.offset)))/8 - } - if aScore >= bScore { + if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 { return a } return b } const goodEnough = 100 - nextHashL := hash8(cv, bestLongTableBits) - nextHashS := hash4x64(cv, bestShortTableBits) + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] + matchAt := func(offset int32, s int32, first uint32, rep int32) match { + if s-offset >= e.maxMatchOff || load3232(src, offset) != first { + return match{s: s, est: highScore} + } + if debugAsserts { + if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { + panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} + m.estBits(bitsPerByte) + return m + } + best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) + if canRepeat && best.length < goodEnough { - best = bestOf(best, matchAt(s-offset1+1, s+1, uint32(cv>>8), 1)) - best = bestOf(best, matchAt(s-offset2+1, s+1, uint32(cv>>8), 2)) - best = bestOf(best, matchAt(s-offset3+1, s+1, uint32(cv>>8), 3)) + cv32 := uint32(cv >> 8) + spp := s + 1 + best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) + best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) + best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) if best.length > 0 { - best = bestOf(best, matchAt(s-offset1+3, s+3, uint32(cv>>24), 1)) - best = bestOf(best, matchAt(s-offset2+3, s+3, uint32(cv>>24), 2)) - best = bestOf(best, matchAt(s-offset3+3, s+3, uint32(cv>>24), 3)) + cv32 = uint32(cv >> 24) + spp += 2 + best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) + best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) + best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) } } // Load next and check... @@ -209,22 +254,28 @@ encodeLoop: } s++ - candidateS = e.table[hash4x64(cv>>8, bestShortTableBits)] + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] cv = load6432(src, s) cv2 := load6432(src, s+1) - candidateL = e.longTable[hash8(cv, bestLongTableBits)] - candidateL2 := e.longTable[hash8(cv2, bestLongTableBits)] + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + // Short at s+1 best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + // Long at s+1, s+2 best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) - + if false { + // Short at s+3. + // Too often worse... + best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)) + } // See if we can find a better match by checking where the current best ends. // Use that offset to see if we can find a better full match. if sAt := best.s + best.length; sAt < sLimit { - nextHashL := hash8(load6432(src, sAt), bestLongTableBits) + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) candidateEnd := e.longTable[nextHashL] if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) @@ -236,6 +287,12 @@ encodeLoop: } } + if debugAsserts { + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + // We have a match, we can store the forward value if best.rep > 0 { s = best.s @@ -284,8 +341,8 @@ encodeLoop: off := index0 + e.cur for index0 < s-1 { cv0 := load6432(src, index0) - h0 := hash8(cv0, bestLongTableBits) - h1 := hash4x64(cv0, bestShortTableBits) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} off++ @@ -311,7 +368,7 @@ encodeLoop: panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) } - if debugAsserts && canRepeat && int(offset1) > len(src) { + if debugAsserts && int(offset1) > len(src) { panic("invalid offset") } @@ -352,8 +409,8 @@ encodeLoop: // every entry for index0 < s-1 { cv0 := load6432(src, index0) - h0 := hash8(cv0, bestLongTableBits) - h1 := hash4x64(cv0, bestShortTableBits) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} @@ -374,8 +431,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash4x64(cv, bestShortTableBits) - nextHashL := hash8(cv, bestLongTableBits) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -425,7 +482,7 @@ func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { e.Encode(blk, src) } -// ResetDict will reset and set a dictionary if not nil +// Reset will reset and set a dictionary if not nil func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { e.resetBase(d, singleBlock) if d == nil { @@ -441,10 +498,10 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { const hashLog = bestShortTableBits cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hash4x64(cv, hashLog) // 0 -> 4 - nextHash1 := hash4x64(cv>>8, hashLog) // 1 -> 5 - nextHash2 := hash4x64(cv>>16, hashLog) // 2 -> 6 - nextHash3 := hash4x64(cv>>24, hashLog) // 3 -> 7 + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 e.dictTable[nextHash] = prevEntry{ prev: e.dictTable[nextHash].offset, offset: i, @@ -472,7 +529,7 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { } if len(d.content) >= 8 { cv := load6432(d.content, 0) - h := hash8(cv, bestLongTableBits) + h := hashLen(cv, bestLongTableBits, bestLongLen) e.dictLongTable[h] = prevEntry{ offset: e.maxMatchOff, prev: e.dictLongTable[h].offset, @@ -482,7 +539,7 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { off := 8 // First to read for i := e.maxMatchOff + 1; i < end; i++ { cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hash8(cv, bestLongTableBits) + h := hashLen(cv, bestLongTableBits, bestLongLen) e.dictLongTable[h] = prevEntry{ offset: i, prev: e.dictLongTable[h].offset, diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index eab7b5083e..602c05ee0c 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -9,6 +9,7 @@ import "fmt" const ( betterLongTableBits = 19 // Bits used in the long match table betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash // Note: Increasing the short table bits or making the hash shorter // can actually lead to compression degradation since it will 'steal' more from the @@ -16,6 +17,7 @@ const ( // This greatly depends on the type of input. betterShortTableBits = 13 // Bits used in the short match table betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard @@ -154,8 +156,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hash5(cv, betterShortTableBits) - nextHashL := hash8(cv, betterLongTableBits) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -214,10 +216,10 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} index0 += 2 } cv = load6432(src, s) @@ -275,10 +277,10 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} index0 += 2 } cv = load6432(src, s) @@ -353,7 +355,7 @@ encodeLoop: // See if we can find a long match at s+1 const checkAt = 1 cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, betterLongTableBits) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) candidateL = e.longTable[nextHashL] coffsetL = candidateL.offset - e.cur @@ -413,8 +415,8 @@ encodeLoop: } // Try to find a better match by searching for a long match at the end of the current best match - if true && s+matched < sLimit { - nextHashL := hash8(load6432(src, s+matched), betterLongTableBits) + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) cv := load3232(src, s) candidateL := e.longTable[nextHashL] coffsetL := candidateL.offset - e.cur - matched @@ -495,10 +497,10 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} index0 += 2 } @@ -516,8 +518,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv, betterShortTableBits) - nextHashL := hash8(cv, betterLongTableBits) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -672,8 +674,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hash5(cv, betterShortTableBits) - nextHashL := hash8(cv, betterLongTableBits) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -734,11 +736,11 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.markLongShardDirty(h0) - h1 := hash5(cv1, betterShortTableBits) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} e.markShortShardDirty(h1) index0 += 2 @@ -798,11 +800,11 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.markLongShardDirty(h0) - h1 := hash5(cv1, betterShortTableBits) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} e.markShortShardDirty(h1) index0 += 2 @@ -879,7 +881,7 @@ encodeLoop: // See if we can find a long match at s+1 const checkAt = 1 cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, betterLongTableBits) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) candidateL = e.longTable[nextHashL] coffsetL = candidateL.offset - e.cur @@ -940,7 +942,7 @@ encodeLoop: } // Try to find a better match by searching for a long match at the end of the current best match if s+matched < sLimit { - nextHashL := hash8(load6432(src, s+matched), betterLongTableBits) + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) cv := load3232(src, s) candidateL := e.longTable[nextHashL] coffsetL := candidateL.offset - e.cur - matched @@ -1021,11 +1023,11 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.markLongShardDirty(h0) - h1 := hash5(cv1, betterShortTableBits) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} e.markShortShardDirty(h1) index0 += 2 @@ -1045,8 +1047,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv, betterShortTableBits) - nextHashL := hash8(cv, betterLongTableBits) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -1113,10 +1115,10 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { const hashLog = betterShortTableBits cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hash5(cv, hashLog) // 0 -> 4 - nextHash1 := hash5(cv>>8, hashLog) // 1 -> 5 - nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6 - nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7 + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 e.dictTable[nextHash] = tableEntry{ val: uint32(cv), offset: i, @@ -1145,7 +1147,7 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { } if len(d.content) >= 8 { cv := load6432(d.content, 0) - h := hash8(cv, betterLongTableBits) + h := hashLen(cv, betterLongTableBits, betterLongLen) e.dictLongTable[h] = prevEntry{ offset: e.maxMatchOff, prev: e.dictLongTable[h].offset, @@ -1155,7 +1157,7 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { off := 8 // First to read for i := e.maxMatchOff + 1; i < end; i++ { cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hash8(cv, betterLongTableBits) + h := hashLen(cv, betterLongTableBits, betterLongLen) e.dictLongTable[h] = prevEntry{ offset: i, prev: e.dictLongTable[h].offset, diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index 96b21b90e8..d6b3104240 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -10,6 +10,7 @@ const ( dFastLongTableBits = 17 // Bits used in the long match table dFastLongTableSize = 1 << dFastLongTableBits // Size of the table dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard @@ -17,6 +18,8 @@ const ( dFastShortTableBits = tableBits // Bits used in the short match table dFastShortTableSize = 1 << dFastShortTableBits // Size of the table dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + ) type doubleFastEncoder struct { @@ -124,8 +127,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -208,7 +211,7 @@ encodeLoop: // See if we can find a long match at s+1 const checkAt = 1 cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, dFastLongTableBits) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) candidateL = e.longTable[nextHashL] coffsetL = s - (candidateL.offset - e.cur) + checkAt @@ -304,16 +307,16 @@ encodeLoop: cv1 := load6432(src, index1) te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hash8(cv0, dFastLongTableBits)] = te0 - e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 cv0 >>= 8 cv1 >>= 8 te0.offset++ te1.offset++ te0.val = uint32(cv0) te1.val = uint32(cv1) - e.table[hash5(cv0, dFastShortTableBits)] = te0 - e.table[hash5(cv1, dFastShortTableBits)] = te1 + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 cv = load6432(src, s) @@ -330,8 +333,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -436,8 +439,8 @@ encodeLoop: var t int32 for { - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -521,7 +524,7 @@ encodeLoop: // See if we can find a long match at s+1 const checkAt = 1 cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, dFastLongTableBits) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) candidateL = e.longTable[nextHashL] coffsetL = s - (candidateL.offset - e.cur) + checkAt @@ -614,16 +617,16 @@ encodeLoop: cv1 := load6432(src, index1) te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hash8(cv0, dFastLongTableBits)] = te0 - e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 cv0 >>= 8 cv1 >>= 8 te0.offset++ te1.offset++ te0.val = uint32(cv0) te1.val = uint32(cv1) - e.table[hash5(cv0, dFastShortTableBits)] = te0 - e.table[hash5(cv1, dFastShortTableBits)] = te1 + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 cv = load6432(src, s) @@ -640,8 +643,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv1>>8, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -782,8 +785,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -868,7 +871,7 @@ encodeLoop: // See if we can find a long match at s+1 const checkAt = 1 cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, dFastLongTableBits) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) candidateL = e.longTable[nextHashL] coffsetL = s - (candidateL.offset - e.cur) + checkAt @@ -965,8 +968,8 @@ encodeLoop: cv1 := load6432(src, index1) te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - longHash1 := hash8(cv0, dFastLongTableBits) - longHash2 := hash8(cv0, dFastLongTableBits) + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen) e.longTable[longHash1] = te0 e.longTable[longHash2] = te1 e.markLongShardDirty(longHash1) @@ -977,8 +980,8 @@ encodeLoop: te1.offset++ te0.val = uint32(cv0) te1.val = uint32(cv1) - hashVal1 := hash5(cv0, dFastShortTableBits) - hashVal2 := hash5(cv1, dFastShortTableBits) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) e.table[hashVal1] = te0 e.markShardDirty(hashVal1) e.table[hashVal2] = te1 @@ -999,8 +1002,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -1071,14 +1074,14 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { } if len(d.content) >= 8 { cv := load6432(d.content, 0) - e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ val: uint32(cv), offset: e.maxMatchOff, } end := int32(len(d.content)) - 8 + e.maxMatchOff for i := e.maxMatchOff + 1; i < end; i++ { cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) - e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ val: uint32(cv), offset: i, } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 2246d286dc..f2502629bc 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -11,12 +11,13 @@ import ( ) const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table - tableShardSize = tableSize / tableShardCnt // Size of an individual shard - tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - maxMatchLength = 131074 + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 ) type tableEntry struct { @@ -122,8 +123,8 @@ encodeLoop: panic("offset0 was 0") } - nextHash := hash6(cv, hashLog) - nextHash2 := hash6(cv>>8, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) candidate := e.table[nextHash] candidate2 := e.table[nextHash2] repIndex := s - offset1 + 2 @@ -301,7 +302,7 @@ encodeLoop: } // Store this, since we have it. - nextHash := hash6(cv, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} seq.matchLen = uint32(l) - zstdMinMatch seq.litLen = 0 @@ -405,8 +406,8 @@ encodeLoop: // By not using them for the first 3 matches for { - nextHash := hash6(cv, hashLog) - nextHash2 := hash6(cv>>8, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) candidate := e.table[nextHash] candidate2 := e.table[nextHash2] repIndex := s - offset1 + 2 @@ -589,7 +590,7 @@ encodeLoop: } // Store this, since we have it. - nextHash := hash6(cv, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} seq.matchLen = uint32(l) - zstdMinMatch seq.litLen = 0 @@ -715,8 +716,8 @@ encodeLoop: panic("offset0 was 0") } - nextHash := hash6(cv, hashLog) - nextHash2 := hash6(cv>>8, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) candidate := e.table[nextHash] candidate2 := e.table[nextHash2] repIndex := s - offset1 + 2 @@ -896,7 +897,7 @@ encodeLoop: } // Store this, since we have it. - nextHash := hash6(cv, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} e.markShardDirty(nextHash) seq.matchLen = uint32(l) - zstdMinMatch @@ -957,9 +958,9 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { const hashLog = tableBits cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hash6(cv, hashLog) // 0 -> 5 - nextHash1 := hash6(cv>>8, hashLog) // 1 -> 6 - nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7 + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6 + nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7 e.dictTable[nextHash] = tableEntry{ val: uint32(cv), offset: i, diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index ea85548fc9..e6e315969b 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -33,7 +33,7 @@ type encoder interface { Block() *blockEnc CRC() *xxhash.Digest AppendCRC([]byte) []byte - WindowSize(size int) int32 + WindowSize(size int64) int32 UseBlock(*blockEnc) Reset(d *dict, singleBlock bool) } @@ -48,6 +48,8 @@ type encoderState struct { err error writeErr error nWritten int64 + nInput int64 + frameContentSize int64 headerWritten bool eofWritten bool fullFrameWritten bool @@ -120,7 +122,21 @@ func (e *Encoder) Reset(w io.Writer) { s.w = w s.err = nil s.nWritten = 0 + s.nInput = 0 s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } } // Write data to the encoder. @@ -190,6 +206,7 @@ func (e *Encoder) nextBlock(final bool) error { return s.err } s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) s.current = s.current[:0] s.filling = s.filling[:0] s.headerWritten = true @@ -200,8 +217,8 @@ func (e *Encoder) nextBlock(final bool) error { var tmp [maxHeaderSize]byte fh := frameHeader{ - ContentSize: 0, - WindowSize: uint32(s.encoder.WindowSize(0)), + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), SingleSegment: false, Checksum: e.o.crc, DictID: e.o.dict.ID(), @@ -243,6 +260,7 @@ func (e *Encoder) nextBlock(final bool) error { // Move blocks forward. s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) s.wg.Add(1) go func(src []byte) { if debugEncoder { @@ -394,6 +412,11 @@ func (e *Encoder) Close() error { if err != nil { return err } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } if e.state.fullFrameWritten { return s.err } @@ -470,7 +493,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } fh := frameHeader{ ContentSize: uint64(len(src)), - WindowSize: uint32(enc.WindowSize(len(src))), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), SingleSegment: single, Checksum: e.o.crc, DictID: e.o.dict.ID(), diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 16d4ab63c1..7d29e1d689 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -189,7 +189,7 @@ func EncoderLevelFromZstd(level int) EncoderLevel { case level >= 6 && level < 10: return SpeedBetterCompression case level >= 10: - return SpeedBetterCompression + return SpeedBestCompression } return SpeedDefault } diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index e8cc9a2c22..989c79f8c3 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -22,10 +22,6 @@ type frameDec struct { WindowSize uint64 - // maxWindowSize is the maximum windows size to support. - // should never be bigger than max-int. - maxWindowSize uint64 - // In order queue of blocks being decoded. decoding chan *blockDec @@ -50,8 +46,11 @@ type frameDec struct { } const ( - // The minimum Window_Size is 1 KB. + // MinWindowSize is the minimum Window Size, which is 1 KB. MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. MaxWindowSize = 1 << 29 ) @@ -61,12 +60,11 @@ var ( ) func newFrameDec(o decoderOptions) *frameDec { - d := frameDec{ - o: o, - maxWindowSize: MaxWindowSize, + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize } - if d.maxWindowSize > o.maxDecodedSize { - d.maxWindowSize = o.maxDecodedSize + d := frameDec{ + o: o, } return &d } @@ -251,13 +249,17 @@ func (d *frameDec) reset(br byteBuffer) error { } } - if d.WindowSize > d.maxWindowSize { - printf("window size %d > max %d\n", d.WindowSize, d.maxWindowSize) + if d.WindowSize > uint64(d.o.maxWindowSize) { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } return ErrWindowSizeExceeded } // The minimum Window_Size is 1 KB. if d.WindowSize < MinWindowSize { - println("got window size: ", d.WindowSize) + if debugDecoder { + println("got window size: ", d.WindowSize) + } return ErrWindowSizeTooSmall } d.history.windowSize = int(d.WindowSize) @@ -352,8 +354,8 @@ func (d *frameDec) checkCRC() error { func (d *frameDec) initAsync() { if !d.o.lowMem && !d.SingleSegment { - // set max extra size history to 10MB. - d.history.maxSize = d.history.windowSize + maxBlockSize*5 + // set max extra size history to 2MB. + d.history.maxSize = d.history.windowSize + maxBlockSize } // re-alloc if more than one extra block size. if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go index 4a752067fc..cf33f29a1b 100644 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -13,24 +13,24 @@ const ( prime8bytes = 0xcf1bbcdcb7a56463 ) -// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes. -// l must be >=4 and <=8. Any other value will return hash for 4 bytes. -// h should always be <32. -// Preferably h and l should be a constant. -// FIXME: This does NOT get resolved, if 'mls' is constant, -// so this cannot be used. -func hashLen(u uint64, hashLog, mls uint8) uint32 { +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) case 5: - return hash5(u, hashLog) + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) case 6: - return hash6(u, hashLog) + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) case 7: - return hash7(u, hashLog) + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) case 8: - return hash8(u, hashLog) + return uint32((u * prime8bytes) >> (64 - length)) default: - return hash4x64(u, hashLog) + return (uint32(u) * prime4bytes) >> (32 - length) } } @@ -39,39 +39,3 @@ func hashLen(u uint64, hashLog, mls uint8) uint32 { func hash3(u uint32, h uint8) uint32 { return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) } - -// hash4 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4(u uint32, h uint8) uint32 { - return (u * prime4bytes) >> ((32 - h) & 31) -} - -// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4x64(u uint64, h uint8) uint32 { - return (uint32(u) * prime4bytes) >> ((32 - h) & 31) -} - -// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash5(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) -} - -// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash6(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) -} - -// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash7(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) -} - -// hash8 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash8(u uint64, h uint8) uint32 { - return uint32((u * prime8bytes) >> ((64 - h) & 63)) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go index 426b9cac78..2c112a0ab1 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -195,7 +195,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error { b, d.v4 = consumeUint64(b) b, d.total = consumeUint64(b) copy(d.mem[:], b) - b = b[len(d.mem):] d.n = int(d.total % uint64(len(d.mem))) return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go index 35318d7c46..0ae847f75b 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go @@ -1,6 +1,5 @@ -// +build !appengine -// +build gc -// +build !purego +//go:build !appengine && gc && !purego +// +build !appengine,gc,!purego package xxhash @@ -10,4 +9,4 @@ package xxhash func Sum64(b []byte) uint64 //go:noescape -func writeBlocks(*Digest, []byte) int +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s index 2c9c5357a1..be8db5bf79 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -6,7 +6,7 @@ // Register allocation: // AX h -// CX pointer to advance through b +// SI pointer to advance through b // DX n // BX loop end // R8 v1, k1 @@ -16,39 +16,39 @@ // R12 tmp // R13 prime1v // R14 prime2v -// R15 prime4v +// DI prime4v -// round reads from and advances the buffer pointer in CX. +// round reads from and advances the buffer pointer in SI. // It assumes that R13 has prime1v and R14 has prime2v. #define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ + MOVQ (SI), R12 \ + ADDQ $8, SI \ IMULQ R14, R12 \ ADDQ R12, r \ ROLQ $31, r \ IMULQ R13, r // mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. #define mergeRound(acc, val) \ IMULQ R14, val \ ROLQ $31, val \ IMULQ R13, val \ XORQ val, acc \ IMULQ R13, acc \ - ADDQ R15, acc + ADDQ DI, acc // func Sum64(b []byte) uint64 TEXT ·Sum64(SB), NOSPLIT, $0-32 // Load fixed primes. MOVQ ·prime1v(SB), R13 MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 + MOVQ ·prime4v(SB), DI // Load slice. - MOVQ b_base+0(FP), CX + MOVQ b_base+0(FP), SI MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX + LEAQ (SI)(DX*1), BX // The first loop limit will be len(b)-32. SUBQ $32, BX @@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32 XORQ R11, R11 SUBQ R13, R11 - // Loop until CX > BX. + // Loop until SI > BX. blockLoop: round(R8) round(R9) round(R10) round(R11) - CMPQ CX, BX + CMPQ SI, BX JLE blockLoop MOVQ R8, AX @@ -100,16 +100,16 @@ noBlocks: afterBlocks: ADDQ DX, AX - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. ADDQ $24, BX - CMPQ CX, BX + CMPQ SI, BX JG fourByte wordLoop: // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX + MOVQ (SI), R8 + ADDQ $8, SI IMULQ R14, R8 ROLQ $31, R8 IMULQ R13, R8 @@ -117,18 +117,18 @@ wordLoop: XORQ R8, AX ROLQ $27, AX IMULQ R13, AX - ADDQ R15, AX + ADDQ DI, AX - CMPQ CX, BX + CMPQ SI, BX JLE wordLoop fourByte: ADDQ $4, BX - CMPQ CX, BX + CMPQ SI, BX JG singles - MOVL (CX), R8 - ADDQ $4, CX + MOVL (SI), R8 + ADDQ $4, SI IMULQ R13, R8 XORQ R8, AX @@ -138,19 +138,19 @@ fourByte: singles: ADDQ $4, BX - CMPQ CX, BX + CMPQ SI, BX JGE finalize singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX + MOVBQZX (SI), R12 + ADDQ $1, SI IMULQ ·prime5v(SB), R12 XORQ R12, AX ROLQ $11, AX IMULQ R13, AX - CMPQ CX, BX + CMPQ SI, BX JL singlesLoop finalize: @@ -179,13 +179,13 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40 MOVQ ·prime2v(SB), R14 // Load slice. - MOVQ arg1_base+8(FP), CX - MOVQ arg1_len+16(FP), DX - LEAQ (CX)(DX*1), BX + MOVQ b_base+8(FP), SI + MOVQ b_len+16(FP), DX + LEAQ (SI)(DX*1), BX SUBQ $32, BX // Load vN from d. - MOVQ arg+0(FP), AX + MOVQ d+0(FP), AX MOVQ 0(AX), R8 // v1 MOVQ 8(AX), R9 // v2 MOVQ 16(AX), R10 // v3 @@ -199,7 +199,7 @@ blockLoop: round(R10) round(R11) - CMPQ CX, BX + CMPQ SI, BX JLE blockLoop // Copy vN back to d. @@ -208,8 +208,8 @@ blockLoop: MOVQ R10, 16(AX) MOVQ R11, 24(AX) - // The number of bytes written is CX minus the old base pointer. - SUBQ arg1_base+8(FP), CX - MOVQ CX, ret+32(FP) + // The number of bytes written is SI minus the old base pointer. + SUBQ b_base+8(FP), SI + MOVQ SI, ret+32(FP) RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go index 4a5a821603..1f52f296e7 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -1,3 +1,4 @@ +//go:build !amd64 || appengine || !gc || purego // +build !amd64 appengine !gc purego package xxhash diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index 0372b1714a..9e1baad73b 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -10,8 +10,8 @@ import ( "hash/crc32" "io" - "github.com/golang/snappy" "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" ) const ( diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index 9325b928ae..967f29b312 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -64,8 +64,9 @@ func (r *pooledZipReader) Close() error { } type pooledZipWriter struct { - mu sync.Mutex // guards Close and Read - enc *Encoder + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool } func (w *pooledZipWriter) Write(p []byte) (n int, err error) { @@ -83,7 +84,7 @@ func (w *pooledZipWriter) Close() error { var err error if w.enc != nil { err = w.enc.Close() - zipReaderPool.Put(w.enc) + w.pool.Put(w.enc) w.enc = nil } return err @@ -104,7 +105,7 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { return nil, err } } - return &pooledZipWriter{enc: enc}, nil + return &pooledZipWriter{enc: enc, pool: &pool}, nil } } diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index 1955f2878c..9fe803a5e9 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,6 +1,12 @@ -## unreleased +## 1.4.2 -* Fix regression where `*time.Time` value would be set to empty and not be sent +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent to decode hooks properly [GH-232] ## 1.4.0 diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index 92e6f76fff..4d4bbc733b 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -62,7 +62,8 @@ func DecodeHookExec( func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { return func(f reflect.Value, t reflect.Value) (interface{}, error) { var err error - var data interface{} + data := f.Interface() + newFrom := f for _, f1 := range fs { data, err = DecodeHookExec(f1, newFrom, t) diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 3643901f55..dcee0f2d63 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -192,7 +192,7 @@ type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface // source and target types. type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) -// DecodeHookFuncRaw is a DecodeHookFunc which has complete access to both the source and target +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target // values. type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) @@ -258,6 +258,11 @@ type DecoderConfig struct { // The tag name that mapstructure reads for field names. This // defaults to "mapstructure" TagName string + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool } // A Decoder takes a raw interface value and turns it into structured @@ -376,6 +381,10 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { config.TagName = "mapstructure" } + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + result := &Decoder{ config: config, } @@ -1340,7 +1349,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e continue } - if strings.EqualFold(mK, fieldName) { + if d.config.MatchName(mK, fieldName) { rawMapKey = dataValKey rawMapVal = dataVal.MapIndex(dataValKey) break diff --git a/vendor/github.com/openzipkin/zipkin-go/model/annotation.go b/vendor/github.com/openzipkin/zipkin-go/model/annotation.go index 795bc4143f..7511bd69b7 100644 --- a/vendor/github.com/openzipkin/zipkin-go/model/annotation.go +++ b/vendor/github.com/openzipkin/zipkin-go/model/annotation.go @@ -1,4 +1,4 @@ -// Copyright 2019 The OpenZipkin Authors +// Copyright 2021 The OpenZipkin Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/openzipkin/zipkin-go/model/doc.go b/vendor/github.com/openzipkin/zipkin-go/model/doc.go index 1b11b4df79..72c695c1f5 100644 --- a/vendor/github.com/openzipkin/zipkin-go/model/doc.go +++ b/vendor/github.com/openzipkin/zipkin-go/model/doc.go @@ -1,4 +1,4 @@ -// Copyright 2019 The OpenZipkin Authors +// Copyright 2021 The OpenZipkin Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go b/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go index edb01eb75e..619eea78b7 100644 --- a/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go +++ b/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go @@ -1,4 +1,4 @@ -// Copyright 2019 The OpenZipkin Authors +// Copyright 2021 The OpenZipkin Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/openzipkin/zipkin-go/model/kind.go b/vendor/github.com/openzipkin/zipkin-go/model/kind.go index 5d512ad90f..06f16226c4 100644 --- a/vendor/github.com/openzipkin/zipkin-go/model/kind.go +++ b/vendor/github.com/openzipkin/zipkin-go/model/kind.go @@ -1,4 +1,4 @@ -// Copyright 2019 The OpenZipkin Authors +// Copyright 2021 The OpenZipkin Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/openzipkin/zipkin-go/model/span.go b/vendor/github.com/openzipkin/zipkin-go/model/span.go index 4eddfbb191..78467aba80 100644 --- a/vendor/github.com/openzipkin/zipkin-go/model/span.go +++ b/vendor/github.com/openzipkin/zipkin-go/model/span.go @@ -1,4 +1,4 @@ -// Copyright 2019 The OpenZipkin Authors +// Copyright 2021 The OpenZipkin Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/openzipkin/zipkin-go/model/span_id.go b/vendor/github.com/openzipkin/zipkin-go/model/span_id.go index 452dc871b2..56d6f5dbcc 100644 --- a/vendor/github.com/openzipkin/zipkin-go/model/span_id.go +++ b/vendor/github.com/openzipkin/zipkin-go/model/span_id.go @@ -1,4 +1,4 @@ -// Copyright 2019 The OpenZipkin Authors +// Copyright 2021 The OpenZipkin Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/openzipkin/zipkin-go/model/traceid.go b/vendor/github.com/openzipkin/zipkin-go/model/traceid.go index 68d12d386c..2f29a00111 100644 --- a/vendor/github.com/openzipkin/zipkin-go/model/traceid.go +++ b/vendor/github.com/openzipkin/zipkin-go/model/traceid.go @@ -1,4 +1,4 @@ -// Copyright 2019 The OpenZipkin Authors +// Copyright 2021 The OpenZipkin Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go index 6a60a9a6a5..f066d56305 100644 --- a/vendor/github.com/pierrec/lz4/writer.go +++ b/vendor/github.com/pierrec/lz4/writer.go @@ -84,10 +84,8 @@ func (z *Writer) WithConcurrency(n int) *Writer { z.err = err } } - if isCompressed := res.size&compressedBlockFlag == 0; isCompressed { - // It is now safe to release the buffer as no longer in use by any goroutine. - putBuffer(cap(res.data), res.data) - } + // It is now safe to release the buffer as no longer in use by any goroutine. + putBuffer(cap(res.data), res.data) if h := z.OnBlockDone; h != nil { h(n) } @@ -231,7 +229,12 @@ func (z *Writer) compressBlock(data []byte) error { if z.c != nil { c := make(chan zResult) z.c <- c // Send now to guarantee order - go writerCompressBlock(c, z.Header, data) + + // get a buffer from the pool and copy the data over + block := getBuffer(z.Header.BlockMaxSize)[:len(data)] + copy(block, data) + + go writerCompressBlock(c, z.Header, block) return nil } @@ -299,7 +302,9 @@ func (z *Writer) Flush() error { return nil } - data := z.data[:z.idx] + data := getBuffer(z.Header.BlockMaxSize)[:len(z.data[:z.idx])] + copy(data, z.data[:z.idx]) + z.idx = 0 if z.c == nil { return z.compressBlock(data) @@ -402,9 +407,13 @@ func writerCompressBlock(c chan zResult, header Header, data []byte) { if zn > 0 && zn < len(data) { res.size = uint32(zn) res.data = zdata[:zn] + // release the uncompressed block since it is not used anymore + putBuffer(header.BlockMaxSize, data) } else { res.size = uint32(len(data)) | compressedBlockFlag res.data = data + // release the compressed block since it was not used + putBuffer(header.BlockMaxSize, zdata) } if header.BlockChecksum { res.checksum = xxh32.ChecksumZero(res.data) diff --git a/vendor/github.com/pierrec/lz4/writer_legacy.go b/vendor/github.com/pierrec/lz4/writer_legacy.go new file mode 100644 index 0000000000..ca8dc8c7f0 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/writer_legacy.go @@ -0,0 +1,182 @@ +package lz4 + +import ( + "encoding/binary" + "io" +) + +// WriterLegacy implements the LZ4Demo frame decoder. +type WriterLegacy struct { + Header + // Handler called when a block has been successfully read. + // It provides the number of bytes read. + OnBlockDone func(size int) + + dst io.Writer // Destination. + data []byte // Data to be compressed + buffer for compressed data. + idx int // Index into data. + hashtable [winSize]int // Hash table used in CompressBlock(). +} + +// NewWriterLegacy returns a new LZ4 encoder for the legacy frame format. +// No access to the underlying io.Writer is performed. +// The supplied Header is checked at the first Write. +// It is ok to change it before the first Write but then not until a Reset() is performed. +func NewWriterLegacy(dst io.Writer) *WriterLegacy { + z := new(WriterLegacy) + z.Reset(dst) + return z +} + +// Write compresses data from the supplied buffer into the underlying io.Writer. +// Write does not return until the data has been written. +func (z *WriterLegacy) Write(buf []byte) (int, error) { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return 0, err + } + } + if debugFlag { + debug("input buffer len=%d index=%d", len(buf), z.idx) + } + + zn := len(z.data) + var n int + for len(buf) > 0 { + if z.idx == 0 && len(buf) >= zn { + // Avoid a copy as there is enough data for a block. + if err := z.compressBlock(buf[:zn]); err != nil { + return n, err + } + n += zn + buf = buf[zn:] + continue + } + // Accumulate the data to be compressed. + m := copy(z.data[z.idx:], buf) + n += m + z.idx += m + buf = buf[m:] + if debugFlag { + debug("%d bytes copied to buf, current index %d", n, z.idx) + } + + if z.idx < len(z.data) { + // Buffer not filled. + if debugFlag { + debug("need more data for compression") + } + return n, nil + } + + // Buffer full. + if err := z.compressBlock(z.data); err != nil { + return n, err + } + z.idx = 0 + } + + return n, nil +} + +// writeHeader builds and writes the header to the underlying io.Writer. +func (z *WriterLegacy) writeHeader() error { + // Legacy has fixed 8MB blocksizes + // https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame + bSize := 2 * blockSize4M + + buf := make([]byte, 2*bSize, 2*bSize) + z.data = buf[:bSize] // Uncompressed buffer is the first half. + + z.idx = 0 + + // Header consists of one mageic number, write it out. + if err := binary.Write(z.dst, binary.LittleEndian, frameMagicLegacy); err != nil { + return err + } + z.Header.done = true + if debugFlag { + debug("wrote header %v", z.Header) + } + + return nil +} + +// compressBlock compresses a block. +func (z *WriterLegacy) compressBlock(data []byte) error { + bSize := 2 * blockSize4M + zdata := z.data[bSize:cap(z.data)] + // The compressed block size cannot exceed the input's. + var zn int + + if level := z.Header.CompressionLevel; level != 0 { + zn, _ = CompressBlockHC(data, zdata, level) + } else { + zn, _ = CompressBlock(data, zdata, z.hashtable[:]) + } + + if debugFlag { + debug("block compression %d => %d", len(data), zn) + } + zdata = zdata[:zn] + + // Write the block. + if err := binary.Write(z.dst, binary.LittleEndian, uint32(zn)); err != nil { + return err + } + written, err := z.dst.Write(zdata) + if err != nil { + return err + } + if h := z.OnBlockDone; h != nil { + h(written) + } + return nil +} + +// Flush flushes any pending compressed data to the underlying writer. +// Flush does not return until the data has been written. +// If the underlying writer returns an error, Flush returns that error. +func (z *WriterLegacy) Flush() error { + if debugFlag { + debug("flush with index %d", z.idx) + } + if z.idx == 0 { + return nil + } + + data := z.data[:z.idx] + z.idx = 0 + return z.compressBlock(data) +} + +// Close closes the WriterLegacy, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. +func (z *WriterLegacy) Close() error { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return err + } + } + if err := z.Flush(); err != nil { + return err + } + + if debugFlag { + debug("writing last empty block") + } + + return nil +} + +// Reset clears the state of the WriterLegacy z such that it is equivalent to its +// initial state from NewWriterLegacy, but instead writing to w. +// No access to the underlying io.Writer is performed. +func (z *WriterLegacy) Reset(w io.Writer) { + z.Header.Reset() + z.dst = w + z.idx = 0 + // reset hashtable to ensure deterministic output. + for i := range z.hashtable { + z.hashtable[i] = 0 + } +} diff --git a/vendor/github.com/spf13/cast/.travis.yml b/vendor/github.com/spf13/cast/.travis.yml deleted file mode 100644 index 833a48799e..0000000000 --- a/vendor/github.com/spf13/cast/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -env: - - GO111MODULE=on -sudo: required -go: - - "1.11.x" - - "1.12.x" - - tip -os: - - linux -matrix: - allow_failures: - - go: tip - fast_finish: true -script: - - make check diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md index e6939397dd..120a573426 100644 --- a/vendor/github.com/spf13/cast/README.md +++ b/vendor/github.com/spf13/cast/README.md @@ -1,7 +1,7 @@ cast ==== [![GoDoc](https://godoc.org/github.com/spf13/cast?status.svg)](https://godoc.org/github.com/spf13/cast) -[![Build Status](https://api.travis-ci.org/spf13/cast.svg?branch=master)](https://travis-ci.org/spf13/cast) +[![Build Status](https://github.com/spf13/cast/actions/workflows/go.yml/badge.svg)](https://github.com/spf13/cast/actions/workflows/go.yml) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast) Easy and safe casting from one type to another in Go diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go index 9fba638d46..0cfe9418de 100644 --- a/vendor/github.com/spf13/cast/cast.go +++ b/vendor/github.com/spf13/cast/cast.go @@ -20,6 +20,11 @@ func ToTime(i interface{}) time.Time { return v } +func ToTimeInDefaultLocation(i interface{}, location *time.Location) time.Time { + v, _ := ToTimeInDefaultLocationE(i, location) + return v +} + // ToDuration casts an interface to a time.Duration type. func ToDuration(i interface{}) time.Duration { v, _ := ToDurationE(i) diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go index 70c7291bed..c04af6a974 100644 --- a/vendor/github.com/spf13/cast/caste.go +++ b/vendor/github.com/spf13/cast/caste.go @@ -20,13 +20,20 @@ var errNegativeNotAllowed = errors.New("unable to cast negative value") // ToTimeE casts an interface to a time.Time type. func ToTimeE(i interface{}) (tim time.Time, err error) { + return ToTimeInDefaultLocationE(i, time.UTC) +} + +// ToTimeInDefaultLocationE casts an empty interface to time.Time, +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func ToTimeInDefaultLocationE(i interface{}, location *time.Location) (tim time.Time, err error) { i = indirect(i) switch v := i.(type) { case time.Time: return v, nil case string: - return StringToDate(v) + return StringToDateInDefaultLocation(v, location) case int: return time.Unix(int64(v), 0), nil case int64: @@ -1129,8 +1136,43 @@ func ToStringSliceE(i interface{}) ([]string, error) { return a, nil case []string: return v, nil + case []int8: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []int: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []int32: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []int64: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []float32: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []float64: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil case string: return strings.Fields(v), nil + case []error: + for _, err := range i.([]error) { + a = append(a, err.Error()) + } + return a, nil case interface{}: str, err := ToStringE(v) if err != nil { @@ -1204,37 +1246,83 @@ func ToDurationSliceE(i interface{}) ([]time.Duration, error) { // predefined list of formats. If no suitable format is found, an error is // returned. func StringToDate(s string) (time.Time, error) { - return parseDateWith(s, []string{ - time.RFC3339, - "2006-01-02T15:04:05", // iso8601 without timezone - time.RFC1123Z, - time.RFC1123, - time.RFC822Z, - time.RFC822, - time.RFC850, - time.ANSIC, - time.UnixDate, - time.RubyDate, - "2006-01-02 15:04:05.999999999 -0700 MST", // Time.String() - "2006-01-02", - "02 Jan 2006", - "2006-01-02T15:04:05-0700", // RFC3339 without timezone hh:mm colon - "2006-01-02 15:04:05 -07:00", - "2006-01-02 15:04:05 -0700", - "2006-01-02 15:04:05Z07:00", // RFC3339 without T - "2006-01-02 15:04:05Z0700", // RFC3339 without T or timezone hh:mm colon - "2006-01-02 15:04:05", - time.Kitchen, - time.Stamp, - time.StampMilli, - time.StampMicro, - time.StampNano, - }) + return parseDateWith(s, time.UTC, timeFormats) +} + +// StringToDateInDefaultLocation casts an empty interface to a time.Time, +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { + return parseDateWith(s, location, timeFormats) } -func parseDateWith(s string, dates []string) (d time.Time, e error) { - for _, dateType := range dates { - if d, e = time.Parse(dateType, s); e == nil { +type timeFormatType int + +const ( + timeFormatNoTimezone timeFormatType = iota + timeFormatNamedTimezone + timeFormatNumericTimezone + timeFormatNumericAndNamedTimezone + timeFormatTimeOnly +) + +type timeFormat struct { + format string + typ timeFormatType +} + +func (f timeFormat) hasTimezone() bool { + // We don't include the formats with only named timezones, see + // https://github.com/golang/go/issues/19694#issuecomment-289103522 + return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone +} + +var ( + timeFormats = []timeFormat{ + timeFormat{time.RFC3339, timeFormatNumericTimezone}, + timeFormat{"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone + timeFormat{time.RFC1123Z, timeFormatNumericTimezone}, + timeFormat{time.RFC1123, timeFormatNamedTimezone}, + timeFormat{time.RFC822Z, timeFormatNumericTimezone}, + timeFormat{time.RFC822, timeFormatNamedTimezone}, + timeFormat{time.RFC850, timeFormatNamedTimezone}, + timeFormat{"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() + timeFormat{"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon + timeFormat{"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon + timeFormat{"2006-01-02 15:04:05", timeFormatNoTimezone}, + timeFormat{time.ANSIC, timeFormatNoTimezone}, + timeFormat{time.UnixDate, timeFormatNamedTimezone}, + timeFormat{time.RubyDate, timeFormatNumericTimezone}, + timeFormat{"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, + timeFormat{"2006-01-02", timeFormatNoTimezone}, + timeFormat{"02 Jan 2006", timeFormatNoTimezone}, + timeFormat{"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, + timeFormat{"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, + timeFormat{time.Kitchen, timeFormatTimeOnly}, + timeFormat{time.Stamp, timeFormatTimeOnly}, + timeFormat{time.StampMilli, timeFormatTimeOnly}, + timeFormat{time.StampMicro, timeFormatTimeOnly}, + timeFormat{time.StampNano, timeFormatTimeOnly}, + } +) + +func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { + + for _, format := range formats { + if d, e = time.Parse(format.format, s); e == nil { + + // Some time formats have a zone name, but no offset, so it gets + // put in that zone name (not the default one passed in to us), but + // without that zone's offset. So set the location manually. + if format.typ <= timeFormatNamedTimezone { + if location == nil { + location = time.Local + } + year, month, day := d.Date() + hour, min, sec := d.Clock() + d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) + } + return } } diff --git a/vendor/github.com/spf13/cast/timeformattype_string.go b/vendor/github.com/spf13/cast/timeformattype_string.go new file mode 100644 index 0000000000..1524fc82ce --- /dev/null +++ b/vendor/github.com/spf13/cast/timeformattype_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type timeFormatType"; DO NOT EDIT. + +package cast + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[timeFormatNoTimezone-0] + _ = x[timeFormatNamedTimezone-1] + _ = x[timeFormatNumericTimezone-2] + _ = x[timeFormatNumericAndNamedTimezone-3] + _ = x[timeFormatTimeOnly-4] +} + +const _timeFormatType_name = "timeFormatNoTimezonetimeFormatNamedTimezonetimeFormatNumericTimezonetimeFormatNumericAndNamedTimezonetimeFormatTimeOnly" + +var _timeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} + +func (i timeFormatType) String() string { + if i < 0 || i >= timeFormatType(len(_timeFormatType_index)-1) { + return "timeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _timeFormatType_name[_timeFormatType_index[i]:_timeFormatType_index[i+1]] +} diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index f409b15192..9712e70514 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -127,11 +127,11 @@ You can handle the specific case where no config file is found like this: ```go if err := viper.ReadInConfig(); err != nil { - if _, ok := err.(viper.ConfigFileNotFoundError); ok { - // Config file not found; ignore error if desired - } else { - // Config file was found but another error was produced - } + if _, ok := err.(viper.ConfigFileNotFoundError); ok { + // Config file not found; ignore error if desired + } else { + // Config file was found but another error was produced + } } // Config file found and successfully parsed @@ -175,10 +175,10 @@ Optionally you can provide a function for Viper to run each time a change occurs **Make sure you add all of the configPaths prior to calling `WatchConfig()`** ```go -viper.WatchConfig() viper.OnConfigChange(func(e fsnotify.Event) { fmt.Println("Config file changed:", e.Name) }) +viper.WatchConfig() ``` ### Reading Config from io.Reader @@ -354,7 +354,7 @@ func main() { i := viper.GetInt("flagname") // retrieve value from viper - ... + // ... } ``` @@ -503,18 +503,18 @@ runtime_viper.Unmarshal(&runtime_conf) // open a goroutine to watch remote changes forever go func(){ for { - time.Sleep(time.Second * 5) // delay after each request - - // currently, only tested with etcd support - err := runtime_viper.WatchRemoteConfig() - if err != nil { - log.Errorf("unable to read remote config: %v", err) - continue - } - - // unmarshal new config into our runtime config struct. you can also use channel - // to implement a signal to notify the system of the changes - runtime_viper.Unmarshal(&runtime_conf) + time.Sleep(time.Second * 5) // delay after each request + + // currently, only tested with etcd support + err := runtime_viper.WatchRemoteConfig() + if err != nil { + log.Errorf("unable to read remote config: %v", err) + continue + } + + // unmarshal new config into our runtime config struct. you can also use channel + // to implement a signal to notify the system of the changes + runtime_viper.Unmarshal(&runtime_conf) } }() ``` @@ -546,7 +546,7 @@ Example: ```go viper.GetString("logfile") // case-insensitive Setting & Getting if viper.GetBool("verbose") { - fmt.Println("verbose enabled") + fmt.Println("verbose enabled") } ``` ### Accessing nested keys @@ -669,7 +669,7 @@ So instead of doing that let's pass a Viper instance to the constructor that rep ```go cache1Config := viper.Sub("cache.cache1") if cache1Config == nil { // Sub returns nil if the key cannot be found - panic("cache configuration not found") + panic("cache configuration not found") } cache1 := NewCache(cache1Config) @@ -681,10 +681,10 @@ Internally, the `NewCache` function can address `max-items` and `item-size` keys ```go func NewCache(v *Viper) *Cache { - return &Cache{ - MaxItems: v.GetInt("max-items"), - ItemSize: v.GetInt("item-size"), - } + return &Cache{ + MaxItems: v.GetInt("max-items"), + ItemSize: v.GetInt("item-size"), + } } ``` @@ -726,18 +726,18 @@ you have to change the delimiter: v := viper.NewWithOptions(viper.KeyDelimiter("::")) v.SetDefault("chart::values", map[string]interface{}{ - "ingress": map[string]interface{}{ - "annotations": map[string]interface{}{ - "traefik.frontend.rule.type": "PathPrefix", - "traefik.ingress.kubernetes.io/ssl-redirect": "true", - }, - }, + "ingress": map[string]interface{}{ + "annotations": map[string]interface{}{ + "traefik.frontend.rule.type": "PathPrefix", + "traefik.ingress.kubernetes.io/ssl-redirect": "true", + }, + }, }) type config struct { Chart struct{ - Values map[string]interface{} - } + Values map[string]interface{} + } } var C config @@ -778,6 +778,15 @@ if err != nil { Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. +### Decoding custom formats + +A frequently requested feature for Viper is adding more value formats and decoders. +For example, parsing character (dot, comma, semicolon, etc) separated strings into slices. + +This is already available in Viper using mapstructure decode hooks. + +Read more about the details in [this blog post](https://sagikazarmark.hu/blog/decoding-custom-formats-with-viper/). + ### Marshalling to string You may need to marshal all the settings held in viper into a string rather than write them to a file. @@ -785,17 +794,17 @@ You can use your favorite format's marshaller with the config returned by `AllSe ```go import ( - yaml "gopkg.in/yaml.v2" - // ... + yaml "gopkg.in/yaml.v2" + // ... ) func yamlStringSettings() string { - c := viper.AllSettings() - bs, err := yaml.Marshal(c) - if err != nil { - log.Fatalf("unable to marshal config to YAML: %v", err) - } - return string(bs) + c := viper.AllSettings() + bs, err := yaml.Marshal(c) + if err != nil { + log.Fatalf("unable to marshal config to YAML: %v", err) + } + return string(bs) } ``` diff --git a/vendor/github.com/spf13/viper/go.mod b/vendor/github.com/spf13/viper/go.mod index 145e0a100f..fcc1a5d923 100644 --- a/vendor/github.com/spf13/viper/go.mod +++ b/vendor/github.com/spf13/viper/go.mod @@ -3,19 +3,18 @@ module github.com/spf13/viper go 1.12 require ( - github.com/bketelsen/crypt v0.0.4 - github.com/fsnotify/fsnotify v1.4.9 + github.com/fsnotify/fsnotify v1.5.1 github.com/hashicorp/hcl v1.0.0 github.com/magiconair/properties v1.8.5 - github.com/mitchellh/mapstructure v1.4.1 - github.com/pelletier/go-toml v1.9.3 - github.com/smartystreets/goconvey v1.6.4 // indirect + github.com/mitchellh/mapstructure v1.4.2 + github.com/pelletier/go-toml v1.9.4 + github.com/sagikazarmark/crypt v0.1.0 github.com/spf13/afero v1.6.0 - github.com/spf13/cast v1.3.1 + github.com/spf13/cast v1.4.1 github.com/spf13/jwalterweatherman v1.1.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.7.0 github.com/subosito/gotenv v1.2.0 - gopkg.in/ini.v1 v1.62.0 + gopkg.in/ini.v1 v1.63.2 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/vendor/github.com/spf13/viper/go.sum b/vendor/github.com/spf13/viper/go.sum index 27730e2aa8..3e0a13ea16 100644 --- a/vendor/github.com/spf13/viper/go.sum +++ b/vendor/github.com/spf13/viper/go.sum @@ -17,8 +17,13 @@ cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKP cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -27,8 +32,8 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0 h1:9x7Bx0A9R5/M9jibeJeZWqjeVEIxYW9fZYqB9a70/bY= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.6.0 h1:dMIWvm+3O0E3DM7kcZPH0FBQ94Xg/OMkdTNDaY9itbI= +cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -41,15 +46,16 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.4 h1:w/jqZtC9YD4DS/Vp9GhWfWcCpuAL58oTnLoI8vE9YHU= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -57,6 +63,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -69,10 +76,13 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -92,6 +102,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -110,6 +121,7 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -123,12 +135,14 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -140,82 +154,91 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0 h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/api v1.10.1 h1:MwZJp86nlnL+6+W1Zly4JUuVn9YHhMggBirMpHGD7kw= +github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/miekg/dns v1.1.26 h1:gPxPSwALAeHJSjarOs00QjVdV9QoBvc1D2ujQUr5BzU= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -223,32 +246,33 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/crypt v0.1.0 h1:AyO7PGna28P9TMH93Bsxd7m9QC4xE6zyGQTXCo7ZrA8= +github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -278,6 +302,7 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= @@ -286,9 +311,11 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -312,7 +339,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -324,12 +350,10 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -340,6 +364,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -361,8 +386,9 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -374,8 +400,11 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 h1:0Ja1LBD+yisY6RWM/BH7TJVXWsSjs2VwBSmvSX4HdBc= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -392,6 +421,7 @@ golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -399,12 +429,17 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -428,8 +463,16 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf h1:2ucpDCmfkl8Bd/FsLtiD653Wf96cW37s+iGx93zsu4k= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -437,8 +480,9 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -448,7 +492,6 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -456,9 +499,9 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -492,8 +535,11 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -520,8 +566,13 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0 h1:URs6qR1lAxDsqWITsQXI4ZkGiYJ5dHtRNiCpfs2OeKA= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.56.0 h1:08F9XVYTLOGeSQb3xI9C0gXMuQanhdGed0cWFhDozbI= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -570,8 +621,19 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71 h1:z+ErRPu0+KS02Td3fOAgdX+lnPDh/VyaABEJPD4JRQs= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -591,8 +653,14 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -604,14 +672,16 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c= +gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/spf13/viper/internal/encoding/decoder.go b/vendor/github.com/spf13/viper/internal/encoding/decoder.go new file mode 100644 index 0000000000..08b1bb66b5 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/decoder.go @@ -0,0 +1,61 @@ +package encoding + +import ( + "sync" +) + +// Decoder decodes the contents of b into a v representation. +// It's primarily used for decoding contents of a file into a map[string]interface{}. +type Decoder interface { + Decode(b []byte, v interface{}) error +} + +const ( + // ErrDecoderNotFound is returned when there is no decoder registered for a format. + ErrDecoderNotFound = encodingError("decoder not found for this format") + + // ErrDecoderFormatAlreadyRegistered is returned when an decoder is already registered for a format. + ErrDecoderFormatAlreadyRegistered = encodingError("decoder already registered for this format") +) + +// DecoderRegistry can choose an appropriate Decoder based on the provided format. +type DecoderRegistry struct { + decoders map[string]Decoder + + mu sync.RWMutex +} + +// NewDecoderRegistry returns a new, initialized DecoderRegistry. +func NewDecoderRegistry() *DecoderRegistry { + return &DecoderRegistry{ + decoders: make(map[string]Decoder), + } +} + +// RegisterDecoder registers a Decoder for a format. +// Registering a Decoder for an already existing format is not supported. +func (e *DecoderRegistry) RegisterDecoder(format string, enc Decoder) error { + e.mu.Lock() + defer e.mu.Unlock() + + if _, ok := e.decoders[format]; ok { + return ErrDecoderFormatAlreadyRegistered + } + + e.decoders[format] = enc + + return nil +} + +// Decode calls the underlying Decoder based on the format. +func (e *DecoderRegistry) Decode(format string, b []byte, v interface{}) error { + e.mu.RLock() + decoder, ok := e.decoders[format] + e.mu.RUnlock() + + if !ok { + return ErrDecoderNotFound + } + + return decoder.Decode(b, v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/encoder.go b/vendor/github.com/spf13/viper/internal/encoding/encoder.go new file mode 100644 index 0000000000..82c7996cbb --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/encoder.go @@ -0,0 +1,60 @@ +package encoding + +import ( + "sync" +) + +// Encoder encodes the contents of v into a byte representation. +// It's primarily used for encoding a map[string]interface{} into a file format. +type Encoder interface { + Encode(v interface{}) ([]byte, error) +} + +const ( + // ErrEncoderNotFound is returned when there is no encoder registered for a format. + ErrEncoderNotFound = encodingError("encoder not found for this format") + + // ErrEncoderFormatAlreadyRegistered is returned when an encoder is already registered for a format. + ErrEncoderFormatAlreadyRegistered = encodingError("encoder already registered for this format") +) + +// EncoderRegistry can choose an appropriate Encoder based on the provided format. +type EncoderRegistry struct { + encoders map[string]Encoder + + mu sync.RWMutex +} + +// NewEncoderRegistry returns a new, initialized EncoderRegistry. +func NewEncoderRegistry() *EncoderRegistry { + return &EncoderRegistry{ + encoders: make(map[string]Encoder), + } +} + +// RegisterEncoder registers an Encoder for a format. +// Registering a Encoder for an already existing format is not supported. +func (e *EncoderRegistry) RegisterEncoder(format string, enc Encoder) error { + e.mu.Lock() + defer e.mu.Unlock() + + if _, ok := e.encoders[format]; ok { + return ErrEncoderFormatAlreadyRegistered + } + + e.encoders[format] = enc + + return nil +} + +func (e *EncoderRegistry) Encode(format string, v interface{}) ([]byte, error) { + e.mu.RLock() + encoder, ok := e.encoders[format] + e.mu.RUnlock() + + if !ok { + return nil, ErrEncoderNotFound + } + + return encoder.Encode(v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/error.go b/vendor/github.com/spf13/viper/internal/encoding/error.go new file mode 100644 index 0000000000..e4cde02d7b --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/error.go @@ -0,0 +1,7 @@ +package encoding + +type encodingError string + +func (e encodingError) Error() string { + return string(e) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go new file mode 100644 index 0000000000..f3e4ab1228 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go @@ -0,0 +1,40 @@ +package hcl + +import ( + "bytes" + "encoding/json" + + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/printer" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for HCL encoding. +// TODO: add printer config to the codec? +type Codec struct{} + +func (Codec) Encode(v interface{}) ([]byte, error) { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + + // TODO: use printer.Format? Is the trailing newline an issue? + + ast, err := hcl.Parse(string(b)) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + + err = printer.Fprint(&buf, ast.Node) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (Codec) Decode(b []byte, v interface{}) error { + return hcl.Unmarshal(b, v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/json/codec.go b/vendor/github.com/spf13/viper/internal/encoding/json/codec.go new file mode 100644 index 0000000000..dff9ec9824 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/json/codec.go @@ -0,0 +1,17 @@ +package json + +import ( + "encoding/json" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for JSON encoding. +type Codec struct{} + +func (Codec) Encode(v interface{}) ([]byte, error) { + // TODO: expose prefix and indent in the Codec as setting? + return json.MarshalIndent(v, "", " ") +} + +func (Codec) Decode(b []byte, v interface{}) error { + return json.Unmarshal(b, v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go new file mode 100644 index 0000000000..c043802b91 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go @@ -0,0 +1,45 @@ +package toml + +import ( + "github.com/pelletier/go-toml" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. +type Codec struct{} + +func (Codec) Encode(v interface{}) ([]byte, error) { + if m, ok := v.(map[string]interface{}); ok { + t, err := toml.TreeFromMap(m) + if err != nil { + return nil, err + } + + s, err := t.ToTomlString() + if err != nil { + return nil, err + } + + return []byte(s), nil + } + + return toml.Marshal(v) +} + +func (Codec) Decode(b []byte, v interface{}) error { + tree, err := toml.LoadBytes(b) + if err != nil { + return err + } + + if m, ok := v.(*map[string]interface{}); ok { + vmap := *m + tmap := tree.ToMap() + for k, v := range tmap { + vmap[k] = v + } + + return nil + } + + return tree.Unmarshal(v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go new file mode 100644 index 0000000000..f94b269966 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go @@ -0,0 +1,14 @@ +package yaml + +import "gopkg.in/yaml.v2" + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding. +type Codec struct{} + +func (Codec) Encode(v interface{}) ([]byte, error) { + return yaml.Marshal(v) +} + +func (Codec) Decode(b []byte, v interface{}) error { + return yaml.Unmarshal(b, v) +} diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go index cee6b24296..09d051a221 100644 --- a/vendor/github.com/spf13/viper/util.go +++ b/vendor/github.com/spf13/viper/util.go @@ -95,19 +95,7 @@ func absPathify(inPath string) string { inPath = userHomeDir() + inPath[5:] } - if strings.HasPrefix(inPath, "$") { - end := strings.Index(inPath, string(os.PathSeparator)) - - var value, suffix string - if end == -1 { - value = os.Getenv(inPath[1:]) - } else { - value = os.Getenv(inPath[1:end]) - suffix = inPath[end:] - } - - inPath = value + suffix - } + inPath = os.ExpandEnv(inPath) if filepath.IsAbs(inPath) { return filepath.Clean(inPath) diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index e8c04627bb..9e2e3537f6 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -22,7 +22,6 @@ package viper import ( "bytes" "encoding/csv" - "encoding/json" "errors" "fmt" "io" @@ -36,18 +35,20 @@ import ( "time" "github.com/fsnotify/fsnotify" - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/printer" "github.com/magiconair/properties" "github.com/mitchellh/mapstructure" - "github.com/pelletier/go-toml" "github.com/spf13/afero" "github.com/spf13/cast" jww "github.com/spf13/jwalterweatherman" "github.com/spf13/pflag" "github.com/subosito/gotenv" "gopkg.in/ini.v1" - "gopkg.in/yaml.v2" + + "github.com/spf13/viper/internal/encoding" + "github.com/spf13/viper/internal/encoding/hcl" + "github.com/spf13/viper/internal/encoding/json" + "github.com/spf13/viper/internal/encoding/toml" + "github.com/spf13/viper/internal/encoding/yaml" ) // ConfigMarshalError happens when failing to marshal the configuration. @@ -67,8 +68,47 @@ type RemoteResponse struct { Error error } +var ( + encoderRegistry = encoding.NewEncoderRegistry() + decoderRegistry = encoding.NewDecoderRegistry() +) + func init() { v = New() + + { + codec := yaml.Codec{} + + encoderRegistry.RegisterEncoder("yaml", codec) + decoderRegistry.RegisterDecoder("yaml", codec) + + encoderRegistry.RegisterEncoder("yml", codec) + decoderRegistry.RegisterDecoder("yml", codec) + } + + { + codec := json.Codec{} + + encoderRegistry.RegisterEncoder("json", codec) + decoderRegistry.RegisterDecoder("json", codec) + } + + { + codec := toml.Codec{} + + encoderRegistry.RegisterEncoder("toml", codec) + decoderRegistry.RegisterDecoder("toml", codec) + } + + { + codec := hcl.Codec{} + + encoderRegistry.RegisterEncoder("hcl", codec) + decoderRegistry.RegisterDecoder("hcl", codec) + + encoderRegistry.RegisterEncoder("tfvars", codec) + decoderRegistry.RegisterDecoder("tfvars", codec) + } } type remoteConfigFactory interface { @@ -292,7 +332,7 @@ func NewWithOptions(opts ...Option) *Viper { // can use it in their testing as well. func Reset() { v = New() - SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"} + SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} } @@ -331,7 +371,7 @@ type RemoteProvider interface { } // SupportedExts are universally supported extensions. -var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"} +var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} // SupportedRemoteProviders are universally supported remote providers. var SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} @@ -1367,11 +1407,13 @@ func (v *Viper) realKey(key string) string { func InConfig(key string) bool { return v.InConfig(key) } func (v *Viper) InConfig(key string) bool { + lcaseKey := strings.ToLower(key) + // if the requested key is an alias, then return the proper key - key = v.realKey(key) + lcaseKey = v.realKey(lcaseKey) + path := strings.Split(lcaseKey, v.keyDelim) - _, exists := v.config[key] - return exists + return v.searchIndexableWithPathPrefixes(v.config, path) != nil } // SetDefault sets the default value for this key. @@ -1542,7 +1584,7 @@ func (v *Viper) writeConfig(filename string, force bool) error { var configType string ext := filepath.Ext(filename) - if ext != "" { + if ext != "" && ext != filepath.Base(filename) { configType = ext[1:] } else { configType = v.configType @@ -1584,35 +1626,12 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { buf := new(bytes.Buffer) buf.ReadFrom(in) - switch strings.ToLower(v.getConfigType()) { - case "yaml", "yml": - if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil { - return ConfigParseError{err} - } - - case "json": - if err := json.Unmarshal(buf.Bytes(), &c); err != nil { - return ConfigParseError{err} - } - - case "hcl": - obj, err := hcl.Parse(buf.String()) - if err != nil { - return ConfigParseError{err} - } - if err = hcl.DecodeObject(&c, obj); err != nil { - return ConfigParseError{err} - } - - case "toml": - tree, err := toml.LoadReader(buf) + switch format := strings.ToLower(v.getConfigType()); format { + case "yaml", "yml", "json", "toml", "hcl", "tfvars": + err := decoderRegistry.Decode(format, buf.Bytes(), &c) if err != nil { return ConfigParseError{err} } - tmap := tree.ToMap() - for k, v := range tmap { - c[k] = v - } case "dotenv", "env": env, err := gotenv.StrictParse(buf) @@ -1665,26 +1684,13 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { func (v *Viper) marshalWriter(f afero.File, configType string) error { c := v.AllSettings() switch configType { - case "json": - b, err := json.MarshalIndent(c, "", " ") - if err != nil { - return ConfigMarshalError{err} - } - _, err = f.WriteString(string(b)) + case "yaml", "yml", "json", "toml", "hcl", "tfvars": + b, err := encoderRegistry.Encode(configType, c) if err != nil { return ConfigMarshalError{err} } - case "hcl": - b, err := json.Marshal(c) - if err != nil { - return ConfigMarshalError{err} - } - ast, err := hcl.Parse(string(b)) - if err != nil { - return ConfigMarshalError{err} - } - err = printer.Fprint(f, ast.Node) + _, err = f.WriteString(string(b)) if err != nil { return ConfigMarshalError{err} } @@ -1717,25 +1723,6 @@ func (v *Viper) marshalWriter(f afero.File, configType string) error { return ConfigMarshalError{err} } - case "toml": - t, err := toml.TreeFromMap(c) - if err != nil { - return ConfigMarshalError{err} - } - s := t.String() - if _, err := f.WriteString(s); err != nil { - return ConfigMarshalError{err} - } - - case "yaml", "yml": - b, err := yaml.Marshal(c) - if err != nil { - return ConfigMarshalError{err} - } - if _, err = f.WriteString(string(b)); err != nil { - return ConfigMarshalError{err} - } - case "ini": keys := v.AllKeys() cfg := ini.Empty() diff --git a/vendor/github.com/thediveo/enumflag/LICENSE b/vendor/github.com/thediveo/enumflag/LICENSE new file mode 100644 index 0000000000..d9a10c0d8e --- /dev/null +++ b/vendor/github.com/thediveo/enumflag/LICENSE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/thediveo/enumflag/README.md b/vendor/github.com/thediveo/enumflag/README.md new file mode 100644 index 0000000000..cf9e833bee --- /dev/null +++ b/vendor/github.com/thediveo/enumflag/README.md @@ -0,0 +1,166 @@ +# CLI Enumeration Flags + +[![GoDoc](https://godoc.org/github.com/thediveo/enumflag?status.svg)](http://godoc.org/github.com/thediveo/enumflag) +[![GitHub](https://img.shields.io/github/license/thediveo/enumflag)](https://img.shields.io/github/license/thediveo/enumflag) +![build and test](https://github.com/thediveo/enumflag/workflows/build%20and%20test/badge.svg?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/thediveo/enumflag)](https://goreportcard.com/report/github.com/thediveo/enumflag) +![Coverage](https://img.shields.io/badge/coverage-135.72%25-darkcyan) + +`enumflag` is a Golang package which supplements the Golang CLI flag packages +[spf13/cobra](https://github.com/spf13/cobra) and +[spf13/pflag](https://github.com/spf13/pflag) with enumeration flags, including +support for enumeration slices. + +For instance, users can specify enum flags as `--mode=foo` or `--mode=bar`, +where `foo` and `bar` are valid enumeration values. Other values which are not +part of the set of allowed enumeration values cannot be set and raise CLI flag +errors. In case of an enumeration _slice_ flag users can specify multiple +enumeration values either with a single flag `--mode=foo,bar` or multiple flag +calls, such as `--mode=foo --mode=bar`. + +Application programmers then simply deal with enumeration values in form of +uints (or ints), liberated from parsing strings and validating enumeration +flags. + +## Alternatives + +In case you are just interested in string-based one-of-a-set flags, then the +following packages offer you a minimalist approach: + +- [hashicorp/packer/helper/enumflag](https://godoc.org/github.com/hashicorp/packer/helper/enumflag) + really is a reduced-to-the-max version without any whistles and bells. + +- [creachadair/goflags/enumflag](https://godoc.org/github.com/creachadair/goflags/enumflag) + has a similar, but slightly more elaborate API with additional "indices" for + enumeration values. + +But if you instead want to handle one-of-a-set flags as properly typed +enumerations instead of strings, or if you need (multiple-of-a-set) slice +support, then please read on. + +## How To Use: Properly Typed Enum Flag + +Without further ado, here's how to define and use enum flags in your own +applications... + +```go +import ( + "fmt" + + "github.com/spf13/cobra" + "github.com/thediveo/enumflag" +) + +// ① Define your new enum flag type. It can be derived from enumflag.Flag, but +// it doesn't need to be as long as it is compatible with enumflag.Flag, so +// either an int or uint. +type FooMode enumflag.Flag + +// ② Define the enumeration values for FooMode. +const ( + Foo FooMode = iota + Bar +) + +// ③ Map enumeration values to their textual representations (value +// identifiers). +var FooModeIds = map[FooMode][]string{ + Foo: {"foo"}, + Bar: {"bar"}, +} + +// ④ Now use the FooMode enum flag. +var foomode FooMode + +func main() { + rootCmd := &cobra.Command{ + Run: func(cmd *cobra.Command, _ []string) { + fmt.Printf("mode is: %d=%q\n", + foomode, + cmd.PersistentFlags().Lookup("mode").Value.String()) + }, + } + // ⑤ Define the CLI flag parameters for your wrapped enum flag. + rootCmd.PersistentFlags().VarP( + enumflag.New(&foomode, "mode", FooModeIds, enumflag.EnumCaseInsensitive), + "mode", "m", + "foos the output; can be 'foo' or 'bar'") + + rootCmd.SetArgs([]string{"--mode", "bAr"}) + _ = rootCmd.Execute() +} +``` + +The boilerplate pattern is always the same: + +1. Define your own new enumeration type, such as `type FooMode enumflag.Flag`. +2. Define the constants in your enumeration. +3. Define the mapping of the constants onto enum values (textual + representations). +4. Somewhere, declare a flag variable of your enum flag type. +5. Wire up your flag variable to its flag long and short names, et cetera. + +## How To Use: Slice of Enums + +For a slice of enumerations, simply declare your variable to be a slice of your +enumeration type and then use `enumflag.NewSlice(...)` instead of +`enumflag.New(...)`. + +```go +import ( + "fmt" + + "github.com/spf13/cobra" + "github.com/thediveo/enumflag" +) + +// ① Define your new enum flag type. It can be derived from enumflag.Flag, but +// it doesn't need to be as long as it is compatible with enumflag.Flag, so +// either an int or uint. +type MooMode enumflag.Flag + +// ② Define the enumeration values for FooMode. +const ( + Moo MooMode = (iota + 1) * 111 + Møø + Mimimi +) + +// ③ Map enumeration values to their textual representations (value +// identifiers). +var MooModeIds = map[MooMode][]string{ + Moo: {"moo"}, + Møø: {"møø"}, + Mimimi: {"mimimi"}, +} + +// User-defined enum flag types should be derived from "enumflag.Flag"; however +// this is not strictly necessary as long as they can be converted into the +// "enumflag.Flag" type. Actually, "enumflag.Flag" is just a fancy name for an +// "uint". In order to use such user-defined enum flags as flag slices, simply +// wrap them using enumflag.NewSlice. +func Example_slice() { + // ④ Define your enum slice flag value. + var moomode []MooMode + rootCmd := &cobra.Command{ + Run: func(cmd *cobra.Command, _ []string) { + fmt.Printf("mode is: %d=%q\n", + moomode, + cmd.PersistentFlags().Lookup("mode").Value.String()) + }, + } + // ⑤ Define the CLI flag parameters for your wrapped enumm slice flag. + rootCmd.PersistentFlags().VarP( + enumflag.NewSlice(&moomode, "mode", MooModeIds, enumflag.EnumCaseInsensitive), + "mode", "m", + "can be any combination of 'moo', 'møø', 'mimimi'") + + rootCmd.SetArgs([]string{"--mode", "Moo,møø"}) + _ = rootCmd.Execute() +} +``` + +## Copyright and License + +`lxkns` is Copyright 2020 Harald Albrecht, and licensed under the Apache +License, Version 2.0. diff --git a/vendor/github.com/thediveo/enumflag/doc.go b/vendor/github.com/thediveo/enumflag/doc.go new file mode 100644 index 0000000000..37405e7935 --- /dev/null +++ b/vendor/github.com/thediveo/enumflag/doc.go @@ -0,0 +1,16 @@ +/* + +Package enumflag supplements the Golang CLI flag handling packages spf13/cobra +and spf13/pflag with enumeration flags. + +For instance, users can specify enum flags as "--mode=foo" or "--mode=bar", +where "foo" and "bar" are valid enumeration values. Other values which are not +part of the set of allowed enumeration values cannot be set and raise CLI flag +errors. + +Application programmers then simply deal with enumeration values in form of +uints (or ints), liberated from parsing strings and validating enumeration +flags. + +*/ +package enumflag diff --git a/vendor/github.com/thediveo/enumflag/enumflag.code-workspace b/vendor/github.com/thediveo/enumflag/enumflag.code-workspace new file mode 100644 index 0000000000..362d7c25bb --- /dev/null +++ b/vendor/github.com/thediveo/enumflag/enumflag.code-workspace @@ -0,0 +1,7 @@ +{ + "folders": [ + { + "path": "." + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/thediveo/enumflag/enumflag.go b/vendor/github.com/thediveo/enumflag/enumflag.go new file mode 100644 index 0000000000..df84581e2f --- /dev/null +++ b/vendor/github.com/thediveo/enumflag/enumflag.go @@ -0,0 +1,196 @@ +// Copyright 2020 Harald Albrecht. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy +// of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package enumflag + +import ( + "fmt" + "reflect" + "sort" + "strings" +) + +// Flag represents a CLI (enumeration) flag which can take on only a single +// enumeration value out of a fixed set of enumeration values. Applications +// using the enumflag package might want to derive their enumeration flags from +// Flag, such as "type MyFoo enumflag.Flag", but they don't need to. The only +// requirement for user-defined enumeration flags is that they must be +// compatible with the Flag type. +type Flag uint + +// EnumCaseSensitivity specifies whether the textual representations of enum +// values are considered to be case sensitive, or not. +type EnumCaseSensitivity bool + +// Controls whether the textual representations for enum values are case +// sensitive, or not. +const ( + EnumCaseInsensitive EnumCaseSensitivity = false + EnumCaseSensitive EnumCaseSensitivity = true +) + +// EnumValue wraps a user-defined enum type value and implements the pflag.Value +// interface, so the user's enum type value can directly be used with the fine +// pflag drop-in package for Golang CLI flags. +type EnumValue struct { + value interface{} // enum value of a user-defined enum type. + enumtype string // name of the user-defined enum type. + names enumValueNames // enum value names. + sensitivity EnumCaseSensitivity // case sensitive or insensitive? + flagtype reflect.Type // cached enum reflection type. +} + +// New wraps a given enum variable so that it can be used as a flag Value with +// pflag.Var and pflag.VarP. The specified flag must be a pointer to a +// user-defined enum value, as otherwise the flag value cannot be managed +// (changed) later on, when a CLI user tries to set it via its corresponding CLI +// flag. +func New(flag interface{}, typename string, mapping interface{}, sensitivity EnumCaseSensitivity) *EnumValue { + // Ensure that the specified enumeration variable is of a compatible type + // and that it actually is a pointer to the enum value. + flagtype := reflect.TypeOf(flag) + if flagtype.Kind() != reflect.Ptr { + panic(fmt.Sprintf( + "New requires flag to be a pointer to an enum value in order to manage it")) + } + return newEnumValue(flag, typename, mapping, sensitivity) +} + +// newEnumValue returns a correctly set up new EnumValue. It expects the flag +// var to be either a pointer or a pointer to the slice, and the caller to have +// checked this before calling. +func newEnumValue(flag interface{}, typename string, mapping interface{}, sensitivity EnumCaseSensitivity) *EnumValue { + flagtype := reflect.TypeOf(flag).Elem() + if flagtype.Kind() == reflect.Slice { + flagtype = flagtype.Elem() + } + flagtypename := flagtype.Name() + if !flagtype.ConvertibleTo(uintType) { + panic(fmt.Sprintf("incompatible enum value type %s", flagtypename)) + } + // Next, ensure that the enumeration values (in form of textual + // representations) actually are stored in a map. + mappingrval := reflect.ValueOf(mapping) + if mappingrval.Kind() != reflect.Map || !mappingrval.Type().Key().ConvertibleTo(uintType) { + panic(fmt.Sprintf("incompatible enum values map type %s", + mappingrval.Type().Name())) + } + // Oh, the magic of Golang reflexions makes us put on our beer googles, erm, + // goggles: we now convert the specified enum values into our "canonical" + // mapping. While we can (mostly) keep the map values (which are string + // slices), we have to convert the map keys into our canonical EnumFlag key + // type (=enum values type). + enummap := enumValueNames{} + for _, key := range mappingrval.MapKeys() { + names := mappingrval.MapIndex(key).Interface().([]string) + if sensitivity == EnumCaseInsensitive { + names = append(names[:0:0], names...) // https://github.com/golang/go/wiki/SliceTricks + for idx, name := range names { + names[idx] = strings.ToLower(name) + } + } + enummap[key.Convert(enumFlagType).Interface().(Flag)] = names + } + // Finally return the Value-compatible wrapper, which now has the necessary + // information about the mapping and case sensitivity, as well as the other + // things. + return &EnumValue{ + value: flag, + enumtype: typename, + names: enummap, + sensitivity: sensitivity, + flagtype: flagtype, + } +} + +// Get returns the managed enum value as a convenience. +func (e *EnumValue) Get() interface{} { + return e.value +} + +// Set sets the enum flag to the specified enum value. If the specified value +// isn't a valid enum value, then the enum flag will be unchanged and an error +// returned instead. +func (e *EnumValue) Set(val string) error { + enumcode, err := e.code(val) + if err == nil { + // When creating our enum flag wrapper we made sure it has a value + // reference, so we don't need to double-check here again, but now access it + // always indirectly. + reflect.ValueOf(e.value).Elem().Set( + reflect.ValueOf(enumcode).Convert(e.flagtype)) + } + return err +} + +// code parses the textual representation of an enumeration value, returning the +// corresponding enumeration value, or an error. +func (e *EnumValue) code(val string) (Flag, error) { + if e.sensitivity == EnumCaseInsensitive { + val = strings.ToLower(val) + } + // Try to find a matching enum value textual representation, and then take + // its enumation value ("code"). + for enumval, ids := range e.names { + for _, id := range ids { + if val == id { + return enumval, nil + } + } + } + // Oh no! An invalid textual enum value was specified, so let's generate + // some useful error explaining which textual representations are valid. + // We're ordering values by their canonical names in order to achieve a + // stable error message. + allids := []string{} + for _, ids := range e.names { + s := []string{} + for _, id := range ids { + s = append(s, "'"+id+"'") + } + allids = append(allids, strings.Join(s, "/")) + } + sort.Strings(allids) + return 0, fmt.Errorf("must be %s", strings.Join(allids, ", ")) +} + +// String returns the textual representation of an enumeration (flag) value. In +// case multiple textual representations (=identifiers) exist for the same +// enumeration value, then only the first textual representation is returned, +// which is considered to be the canonical one. +func (e *EnumValue) String() string { + flagval := reflect.ValueOf(e.value).Elem() + if ids, ok := e.names[flagval.Convert(enumFlagType).Interface().(Flag)]; ok { + if len(ids) > 0 { + return ids[0] + } + } + return "" +} + +// Type returns the name of the flag value type. The type name is used in error +// message. +func (e *EnumValue) Type() string { + return e.enumtype +} + +// enumValueNames maps enumeration values to their corresponding textual +// representations. This mapping is a one-to-many mapping in that the same +// enumeration value may have more than only one associated textual +// representation. +type enumValueNames map[Flag][]string + +// Reflection types used in this package. +var enumFlagType = reflect.TypeOf(Flag(0)) +var uintType = reflect.TypeOf(uint(0)) diff --git a/vendor/github.com/thediveo/enumflag/enumslice.go b/vendor/github.com/thediveo/enumflag/enumslice.go new file mode 100644 index 0000000000..d08b49015f --- /dev/null +++ b/vendor/github.com/thediveo/enumflag/enumslice.go @@ -0,0 +1,94 @@ +package enumflag + +import ( + "fmt" + "reflect" + "strings" +) + +// EnumSliceValue wraps a slice of enum values for a user-defined enum type. +type EnumSliceValue struct { + *EnumValue + merge bool // replace complete slice or merge values? +} + +// NewSlice warps a given enum slice variable so that it can ve used as a flag +// Value with pflag.Var and pflag.VarP. It takes the same parameters as New, +// with the exception of expecting a slice instead of a single enum var. +func NewSlice(flag interface{}, typename string, mapping interface{}, sensitivity EnumCaseSensitivity) *EnumSliceValue { + flagtype := reflect.TypeOf(flag) + if flagtype.Kind() != reflect.Ptr || flagtype.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf( + "NewSlice requires flag to be a pointer to an enum value slice in order to manage it")) + } + return &EnumSliceValue{ + EnumValue: newEnumValue(flag, typename, mapping, sensitivity), + } +} + +// Set either sets or merges the enum slice flag: the first call will set the +// flag value to the specified set of enum values. Later calls then merge enum +// values instead of replacing the current set. This mimics the behavior of +// pflag's slice flags. +func (e *EnumSliceValue) Set(val string) error { + // First parse and convert the textual enum values into their + // program-internal codes. + vals := strings.Split(val, ",") + enums := make([]Flag, len(vals)) + for idx, enumval := range vals { + enumcode, err := e.code(enumval) + if err != nil { + return err + } + enums[idx] = enumcode + } + enumslice := reflect.ValueOf(e.value).Elem() + if !e.merge { + // Replace any existing default enum value set. For this, we need to + // convert the parsed enum codes into the format used by the + // user-defined enum slice. + size := len(enums) + v := reflect.MakeSlice(reflect.TypeOf(e.value).Elem(), size, size) + for idx, enumcode := range enums { + v.Index(idx).Set(reflect.ValueOf(enumcode).Convert(e.flagtype)) + } + enumslice.Set(v) + e.merge = true // ...and next time: merge. + } else { + // Merge in the existing enum values. + next: + for _, enumcode := range enums { + // Is this enum value (code) already part of the slice? Then skip + // it, otherwise append it. + size := enumslice.Len() + for idx := 0; idx < size; idx++ { + if enumslice.Index(idx).Convert(enumFlagType).Interface().(Flag) == enumcode { + continue next + } + } + enumslice.Set(reflect.Append( + enumslice, + reflect.ValueOf(enumcode).Convert(e.flagtype))) + } + } + return nil +} + +// String returns the textual representation of an enumeration (flag) slice, +// which can contain multiple enumeration values from the same enumeration +// simultaneously. In case multiple textual representations (=identifiers) exist +// for the same enumeration value, then only the first textual representation is +// returned, which is considered to be the canonical one. +func (e *EnumSliceValue) String() string { + flagvals := reflect.ValueOf(e.value).Elem() + idsl := []string{} + size := flagvals.Len() + for idx := 0; idx < size; idx++ { + if ids, ok := e.names[flagvals.Index(idx).Convert(enumFlagType).Interface().(Flag)]; ok { + if len(ids) > 0 { + idsl = append(idsl, ids[0]) + } + } + } + return "[" + strings.Join(idsl, ",") + "]" +} diff --git a/vendor/github.com/thediveo/enumflag/go.mod b/vendor/github.com/thediveo/enumflag/go.mod new file mode 100644 index 0000000000..7a5dc4eb7e --- /dev/null +++ b/vendor/github.com/thediveo/enumflag/go.mod @@ -0,0 +1,9 @@ +module github.com/thediveo/enumflag + +go 1.13 + +require ( + github.com/onsi/ginkgo v1.12.0 + github.com/onsi/gomega v1.9.0 + github.com/spf13/cobra v0.0.7 +) diff --git a/vendor/github.com/thediveo/enumflag/go.sum b/vendor/github.com/thediveo/enumflag/go.sum new file mode 100644 index 0000000000..1646949c1f --- /dev/null +++ b/vendor/github.com/thediveo/enumflag/go.sum @@ -0,0 +1,148 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.7 h1:FfTH+vuMXOas8jmfb5/M7dzEYx7LpcLb7a0LPe34uOU= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/wavesoftware/go-ensure/.editorconfig b/vendor/github.com/wavesoftware/go-ensure/.editorconfig new file mode 100644 index 0000000000..25d2a11a99 --- /dev/null +++ b/vendor/github.com/wavesoftware/go-ensure/.editorconfig @@ -0,0 +1,15 @@ +# EditorConfig is awesome: https://EditorConfig.org + +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 +indent_style = space +indent_size = 2 + +[*.go] +indent_style = tab diff --git a/vendor/github.com/wavesoftware/go-ensure/.gitignore b/vendor/github.com/wavesoftware/go-ensure/.gitignore new file mode 100644 index 0000000000..e67b8a3c6f --- /dev/null +++ b/vendor/github.com/wavesoftware/go-ensure/.gitignore @@ -0,0 +1,17 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +vendor/ + +build/ diff --git a/vendor/github.com/wavesoftware/go-ensure/.travis.yml b/vendor/github.com/wavesoftware/go-ensure/.travis.yml new file mode 100644 index 0000000000..2f4b5c2f14 --- /dev/null +++ b/vendor/github.com/wavesoftware/go-ensure/.travis.yml @@ -0,0 +1,15 @@ +--- +language: go +sudo: false +dist: bionic +go: + - 1.13.x +script: + - make clean test +branches: + only: + - master + - develop + - "/^v\\d/" +notifications: + email: onchange diff --git a/vendor/github.com/wavesoftware/go-ensure/LICENSE b/vendor/github.com/wavesoftware/go-ensure/LICENSE new file mode 100644 index 0000000000..6c180a6f23 --- /dev/null +++ b/vendor/github.com/wavesoftware/go-ensure/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 Wave Software + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/wavesoftware/go-ensure/Makefile b/vendor/github.com/wavesoftware/go-ensure/Makefile new file mode 100644 index 0000000000..afa4896e3a --- /dev/null +++ b/vendor/github.com/wavesoftware/go-ensure/Makefile @@ -0,0 +1,32 @@ +PROJECT_DIR = $(shell readlink -f .) +BUILD_DIR = "$(PROJECT_DIR)/build" + +GO ?= go +RICHGO ?= rich$(GO) + +.PHONY: default +default: binaries + +.PHONY: builddeps +builddeps: + @GO111MODULE=off $(GO) get github.com/kyoh86/richgo + @GO111MODULE=off $(GO) get github.com/mgechev/revive + +.PHONY: builddir +builddir: + @mkdir -p build + +.PHONY: clean +clean: builddeps + @echo "🛁 Cleaning" + @rm -frv $(BUILD_DIR) + +.PHONY: check +check: builddeps + @echo "🛂 Checking" + revive -config revive.toml -formatter stylish ./... + +.PHONY: test +test: builddir check + @echo "✔️ Testing" + $(RICHGO) test -v -covermode=count -coverprofile=build/coverage.out ./... diff --git a/vendor/github.com/wavesoftware/go-ensure/README.md b/vendor/github.com/wavesoftware/go-ensure/README.md new file mode 100644 index 0000000000..82e73e4525 --- /dev/null +++ b/vendor/github.com/wavesoftware/go-ensure/README.md @@ -0,0 +1,55 @@ +# Ensure for Go + +A simple ensure package for Golang + +## Use case + +Writing a Go code makes a lot of repetition, regarding error checking. That's +especially true for end user code like e2e tests when we expect that something +will work. In other cases that's a bug and should code should panic then. + +## Usage + +Instead of writing: + +```go +func DeployAllComponents() error { + alpha, err := deployAlpha() + if err != nil { + return errors.WithMessage(err, "unexpected error") + } + beta, err := deployBeta(alpha) + if err != nil { + return errors.WithMessage(err, "unexpected error") + } + _, err := deployGamma(beta) + if err != nil { + return errors.WithMessage(err, "unexpected error") + } + return nil +} + +// execution isn't simple +err = DeployAllComponents() +if err != nil { + panic(err) +} +``` + +with this PR I can write it like: + +```go +func DeployAllComponents() { + alpha, err := deployAlpha() + ensure.NoError(err) + beta, err := deployBeta(alpha) + ensure.NoError(err) + _, err := deployGamma(beta) + ensure.NoError(err) +} + +// execution is simple +DeployAllComponents() +``` + +Above is much more readable and pleasant to see. diff --git a/vendor/github.com/wavesoftware/go-ensure/errors.go b/vendor/github.com/wavesoftware/go-ensure/errors.go new file mode 100644 index 0000000000..9da1e2e860 --- /dev/null +++ b/vendor/github.com/wavesoftware/go-ensure/errors.go @@ -0,0 +1,33 @@ +package ensure + +import ( + "fmt" + "github.com/pkg/errors" + "regexp" +) + +// NoError will panic if given an error, as it was unexpected +func NoError(err error) { + if err != nil { + panic(errors.WithMessage(err, "unexpected error")) + } +} + +// Error will panic if given no error, as it expected one +func Error(err error) { + if err == nil { + panic(errors.New("expecting error, but none given")) + } +} + +// ErrorWithMessage will panic if given no error, or error message don't match provided regexp +func ErrorWithMessage(err error, messageRegexp string) { + Error(err) + validErrorMessage := regexp.MustCompile(messageRegexp) + if !validErrorMessage.MatchString(err.Error()) { + panic(errors.WithMessage( + err, + fmt.Sprintf("given error doesn't match given regexp (%s)", messageRegexp), + )) + } +} diff --git a/vendor/github.com/wavesoftware/go-ensure/go.mod b/vendor/github.com/wavesoftware/go-ensure/go.mod new file mode 100644 index 0000000000..06dbff1184 --- /dev/null +++ b/vendor/github.com/wavesoftware/go-ensure/go.mod @@ -0,0 +1,5 @@ +module github.com/wavesoftware/go-ensure + +go 1.13 + +require github.com/pkg/errors v0.9.1 diff --git a/vendor/github.com/wavesoftware/go-ensure/go.sum b/vendor/github.com/wavesoftware/go-ensure/go.sum new file mode 100644 index 0000000000..7c401c3f58 --- /dev/null +++ b/vendor/github.com/wavesoftware/go-ensure/go.sum @@ -0,0 +1,2 @@ +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/vendor/github.com/wavesoftware/go-ensure/revive.toml b/vendor/github.com/wavesoftware/go-ensure/revive.toml new file mode 100644 index 0000000000..a4cb9ecd72 --- /dev/null +++ b/vendor/github.com/wavesoftware/go-ensure/revive.toml @@ -0,0 +1,35 @@ +# When set to false, ignores files with "GENERATED" header, similar to golint +ignoreGeneratedHeader = true + +# Sets the default severity to "warning" +severity = "warning" + +# Sets the default failure confidence. This means that linting errors +# with less than 0.8 confidence will be ignored. +confidence = 0.8 + +# Sets the error code for failures with severity "error" +errorCode = 102 + +# Sets the error code for failures with severity "warning" +warningCode = 102 + +[rule.blank-imports] +[rule.context-as-argument] +[rule.context-keys-type] +[rule.dot-imports] +[rule.error-return] +[rule.error-strings] +[rule.error-naming] +[rule.exported] +[rule.if-return] +[rule.increment-decrement] +[rule.var-naming] +[rule.var-declaration] +[rule.package-comments] +[rule.range] +[rule.receiver-naming] +[rule.time-naming] +[rule.unexported-return] +[rule.indent-error-flow] +[rule.errorf] diff --git a/vendor/golang.org/x/crypto/pkcs12/bmp-string.go b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go new file mode 100644 index 0000000000..233b8b62cc --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "errors" + "unicode/utf16" +) + +// bmpString returns s encoded in UCS-2 with a zero terminator. +func bmpString(s string) ([]byte, error) { + // References: + // https://tools.ietf.org/html/rfc7292#appendix-B.1 + // https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane + // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes + // EncodeRune returns 0xfffd if the rune does not need special encoding + // - the above RFC provides the info that BMPStrings are NULL terminated. + + ret := make([]byte, 0, 2*len(s)+2) + + for _, r := range s { + if t, _ := utf16.EncodeRune(r); t != 0xfffd { + return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2") + } + ret = append(ret, byte(r/256), byte(r%256)) + } + + return append(ret, 0, 0), nil +} + +func decodeBMPString(bmpString []byte) (string, error) { + if len(bmpString)%2 != 0 { + return "", errors.New("pkcs12: odd-length BMP string") + } + + // strip terminator if present + if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { + bmpString = bmpString[:l-2] + } + + s := make([]uint16, 0, len(bmpString)/2) + for len(bmpString) > 0 { + s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) + bmpString = bmpString[2:] + } + + return string(utf16.Decode(s)), nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go new file mode 100644 index 0000000000..484ca51b71 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/crypto.go @@ -0,0 +1,131 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/cipher" + "crypto/des" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + + "golang.org/x/crypto/pkcs12/internal/rc2" +) + +var ( + oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) + oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6}) +) + +// pbeCipher is an abstraction of a PKCS#12 cipher. +type pbeCipher interface { + // create returns a cipher.Block given a key. + create(key []byte) (cipher.Block, error) + // deriveKey returns a key derived from the given password and salt. + deriveKey(salt, password []byte, iterations int) []byte + // deriveKey returns an IV derived from the given password and salt. + deriveIV(salt, password []byte, iterations int) []byte +} + +type shaWithTripleDESCBC struct{} + +func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) { + return des.NewTripleDESCipher(key) +} + +func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24) +} + +func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type shaWith40BitRC2CBC struct{} + +func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) { + return rc2.New(key, len(key)*8) +} + +func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5) +} + +func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type pbeParams struct { + Salt []byte + Iterations int +} + +func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) { + var cipherType pbeCipher + + switch { + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC): + cipherType = shaWithTripleDESCBC{} + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC): + cipherType = shaWith40BitRC2CBC{} + default: + return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported") + } + + var params pbeParams + if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil { + return nil, 0, err + } + + key := cipherType.deriveKey(params.Salt, password, params.Iterations) + iv := cipherType.deriveIV(params.Salt, password, params.Iterations) + + block, err := cipherType.create(key) + if err != nil { + return nil, 0, err + } + + return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil +} + +func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) { + cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password) + if err != nil { + return nil, err + } + + encrypted := info.Data() + if len(encrypted) == 0 { + return nil, errors.New("pkcs12: empty encrypted data") + } + if len(encrypted)%blockSize != 0 { + return nil, errors.New("pkcs12: input is not a multiple of the block size") + } + decrypted = make([]byte, len(encrypted)) + cbc.CryptBlocks(decrypted, encrypted) + + psLen := int(decrypted[len(decrypted)-1]) + if psLen == 0 || psLen > blockSize { + return nil, ErrDecryption + } + + if len(decrypted) < psLen { + return nil, ErrDecryption + } + ps := decrypted[len(decrypted)-psLen:] + decrypted = decrypted[:len(decrypted)-psLen] + if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 { + return nil, ErrDecryption + } + + return +} + +// decryptable abstracts an object that contains ciphertext. +type decryptable interface { + Algorithm() pkix.AlgorithmIdentifier + Data() []byte +} diff --git a/vendor/golang.org/x/crypto/pkcs12/errors.go b/vendor/golang.org/x/crypto/pkcs12/errors.go new file mode 100644 index 0000000000..7377ce6fb2 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/errors.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import "errors" + +var ( + // ErrDecryption represents a failure to decrypt the input. + ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding") + + // ErrIncorrectPassword is returned when an incorrect password is detected. + // Usually, P12/PFX data is signed to be able to verify the password. + ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect") +) + +// NotImplementedError indicates that the input is not currently supported. +type NotImplementedError string + +func (e NotImplementedError) Error() string { + return "pkcs12: " + string(e) +} diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go new file mode 100644 index 0000000000..7499e3fb69 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go @@ -0,0 +1,271 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rc2 implements the RC2 cipher +/* +https://www.ietf.org/rfc/rfc2268.txt +http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf + +This code is licensed under the MIT license. +*/ +package rc2 + +import ( + "crypto/cipher" + "encoding/binary" +) + +// The rc2 block size in bytes +const BlockSize = 8 + +type rc2Cipher struct { + k [64]uint16 +} + +// New returns a new rc2 cipher with the given key and effective key length t1 +func New(key []byte, t1 int) (cipher.Block, error) { + // TODO(dgryski): error checking for key length + return &rc2Cipher{ + k: expandKey(key, t1), + }, nil +} + +func (*rc2Cipher) BlockSize() int { return BlockSize } + +var piTable = [256]byte{ + 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d, + 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2, + 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32, + 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82, + 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc, + 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26, + 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03, + 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7, + 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a, + 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec, + 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39, + 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31, + 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9, + 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9, + 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e, + 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad, +} + +func expandKey(key []byte, t1 int) [64]uint16 { + + l := make([]byte, 128) + copy(l, key) + + var t = len(key) + var t8 = (t1 + 7) / 8 + var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8)))) + + for i := len(key); i < 128; i++ { + l[i] = piTable[l[i-1]+l[uint8(i-t)]] + } + + l[128-t8] = piTable[l[128-t8]&tm] + + for i := 127 - t8; i >= 0; i-- { + l[i] = piTable[l[i+1]^l[i+t8]] + } + + var k [64]uint16 + + for i := range k { + k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256 + } + + return k +} + +func rotl16(x uint16, b uint) uint16 { + return (x >> (16 - b)) | (x << b) +} + +func (c *rc2Cipher) Encrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + var j int + + for j <= 16 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 40 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 60 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} + +func (c *rc2Cipher) Decrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + j := 63 + + for j >= 44 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 20 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 0 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} diff --git a/vendor/golang.org/x/crypto/pkcs12/mac.go b/vendor/golang.org/x/crypto/pkcs12/mac.go new file mode 100644 index 0000000000..5f38aa7de8 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/mac.go @@ -0,0 +1,45 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/x509/pkix" + "encoding/asn1" +) + +type macData struct { + Mac digestInfo + MacSalt []byte + Iterations int `asn1:"optional,default:1"` +} + +// from PKCS#7: +type digestInfo struct { + Algorithm pkix.AlgorithmIdentifier + Digest []byte +} + +var ( + oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) +) + +func verifyMac(macData *macData, message, password []byte) error { + if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) { + return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String()) + } + + key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20) + + mac := hmac.New(sha1.New, key) + mac.Write(message) + expectedMAC := mac.Sum(nil) + + if !hmac.Equal(macData.Mac.Digest, expectedMAC) { + return ErrIncorrectPassword + } + return nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/pbkdf.go b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go new file mode 100644 index 0000000000..5c419d41e3 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/sha1" + "math/big" +) + +var ( + one = big.NewInt(1) +) + +// sha1Sum returns the SHA-1 hash of in. +func sha1Sum(in []byte) []byte { + sum := sha1.Sum(in) + return sum[:] +} + +// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of +// repeats of pattern. +func fillWithRepeats(pattern []byte, v int) []byte { + if len(pattern) == 0 { + return nil + } + outputLen := v * ((len(pattern) + v - 1) / v) + return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen] +} + +func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) { + // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments + + // Let H be a hash function built around a compression function f: + + // Z_2^u x Z_2^v -> Z_2^u + + // (that is, H has a chaining variable and output of length u bits, and + // the message input to the compression function of H is v bits). The + // values for u and v are as follows: + + // HASH FUNCTION VALUE u VALUE v + // MD2, MD5 128 512 + // SHA-1 160 512 + // SHA-224 224 512 + // SHA-256 256 512 + // SHA-384 384 1024 + // SHA-512 512 1024 + // SHA-512/224 224 1024 + // SHA-512/256 256 1024 + + // Furthermore, let r be the iteration count. + + // We assume here that u and v are both multiples of 8, as are the + // lengths of the password and salt strings (which we denote by p and s, + // respectively) and the number n of pseudorandom bits required. In + // addition, u and v are of course non-zero. + + // For information on security considerations for MD5 [19], see [25] and + // [1], and on those for MD2, see [18]. + + // The following procedure can be used to produce pseudorandom bits for + // a particular "purpose" that is identified by a byte called "ID". + // This standard specifies 3 different values for the ID byte: + + // 1. If ID=1, then the pseudorandom bits being produced are to be used + // as key material for performing encryption or decryption. + + // 2. If ID=2, then the pseudorandom bits being produced are to be used + // as an IV (Initial Value) for encryption or decryption. + + // 3. If ID=3, then the pseudorandom bits being produced are to be used + // as an integrity key for MACing. + + // 1. Construct a string, D (the "diversifier"), by concatenating v/8 + // copies of ID. + var D []byte + for i := 0; i < v; i++ { + D = append(D, ID) + } + + // 2. Concatenate copies of the salt together to create a string S of + // length v(ceiling(s/v)) bits (the final copy of the salt may be + // truncated to create S). Note that if the salt is the empty + // string, then so is S. + + S := fillWithRepeats(salt, v) + + // 3. Concatenate copies of the password together to create a string P + // of length v(ceiling(p/v)) bits (the final copy of the password + // may be truncated to create P). Note that if the password is the + // empty string, then so is P. + + P := fillWithRepeats(password, v) + + // 4. Set I=S||P to be the concatenation of S and P. + I := append(S, P...) + + // 5. Set c=ceiling(n/u). + c := (size + u - 1) / u + + // 6. For i=1, 2, ..., c, do the following: + A := make([]byte, c*20) + var IjBuf []byte + for i := 0; i < c; i++ { + // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1, + // H(H(H(... H(D||I)))) + Ai := hash(append(D, I...)) + for j := 1; j < r; j++ { + Ai = hash(Ai) + } + copy(A[i*20:], Ai[:]) + + if i < c-1 { // skip on last iteration + // B. Concatenate copies of Ai to create a string B of length v + // bits (the final copy of Ai may be truncated to create B). + var B []byte + for len(B) < v { + B = append(B, Ai[:]...) + } + B = B[:v] + + // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit + // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by + // setting I_j=(I_j+B+1) mod 2^v for each j. + { + Bbi := new(big.Int).SetBytes(B) + Ij := new(big.Int) + + for j := 0; j < len(I)/v; j++ { + Ij.SetBytes(I[j*v : (j+1)*v]) + Ij.Add(Ij, Bbi) + Ij.Add(Ij, one) + Ijb := Ij.Bytes() + // We expect Ijb to be exactly v bytes, + // if it is longer or shorter we must + // adjust it accordingly. + if len(Ijb) > v { + Ijb = Ijb[len(Ijb)-v:] + } + if len(Ijb) < v { + if IjBuf == nil { + IjBuf = make([]byte, v) + } + bytesShort := v - len(Ijb) + for i := 0; i < bytesShort; i++ { + IjBuf[i] = 0 + } + copy(IjBuf[bytesShort:], Ijb) + Ijb = IjBuf + } + copy(I[j*v:(j+1)*v], Ijb) + } + } + } + } + // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom + // bit string, A. + + // 8. Use the first n bits of A as the output of this entire process. + return A[:size] + + // If the above process is being used to generate a DES key, the process + // should be used to create 64 random bits, and the key's parity bits + // should be set after the 64 bits have been produced. Similar concerns + // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any + // similar keys with parity bits "built into them". +} diff --git a/vendor/golang.org/x/crypto/pkcs12/pkcs12.go b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go new file mode 100644 index 0000000000..3a89bdb3e3 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go @@ -0,0 +1,360 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkcs12 implements some of PKCS#12. +// +// This implementation is distilled from https://tools.ietf.org/html/rfc7292 +// and referenced documents. It is intended for decoding P12/PFX-stored +// certificates and keys for use with the crypto/tls package. +// +// This package is frozen. If it's missing functionality you need, consider +// an alternative like software.sslmate.com/src/go-pkcs12. +package pkcs12 + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" +) + +var ( + oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1}) + oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6}) + + oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20}) + oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21}) + oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1}) + + errUnknownAttributeOID = errors.New("pkcs12: unknown attribute OID") +) + +type pfxPdu struct { + Version int + AuthSafe contentInfo + MacData macData `asn1:"optional"` +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"tag:0,explicit,optional"` +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent []byte `asn1:"tag:0,optional"` +} + +func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.ContentEncryptionAlgorithm +} + +func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent } + +type safeBag struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"tag:0,explicit"` + Attributes []pkcs12Attribute `asn1:"set,optional"` +} + +type pkcs12Attribute struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"set"` +} + +type encryptedPrivateKeyInfo struct { + AlgorithmIdentifier pkix.AlgorithmIdentifier + EncryptedData []byte +} + +func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.AlgorithmIdentifier +} + +func (i encryptedPrivateKeyInfo) Data() []byte { + return i.EncryptedData +} + +// PEM block types +const ( + certificateType = "CERTIFICATE" + privateKeyType = "PRIVATE KEY" +) + +// unmarshal calls asn1.Unmarshal, but also returns an error if there is any +// trailing data after unmarshaling. +func unmarshal(in []byte, out interface{}) error { + trailing, err := asn1.Unmarshal(in, out) + if err != nil { + return err + } + if len(trailing) != 0 { + return errors.New("pkcs12: trailing data found") + } + return nil +} + +// ToPEM converts all "safe bags" contained in pfxData to PEM blocks. +// Unknown attributes are discarded. +// +// Note that although the returned PEM blocks for private keys have type +// "PRIVATE KEY", the bytes are not encoded according to PKCS #8, but according +// to PKCS #1 for RSA keys and SEC 1 for ECDSA keys. +func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, ErrIncorrectPassword + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + + if err != nil { + return nil, err + } + + blocks := make([]*pem.Block, 0, len(bags)) + for _, bag := range bags { + block, err := convertBag(&bag, encodedPassword) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +func convertBag(bag *safeBag, password []byte) (*pem.Block, error) { + block := &pem.Block{ + Headers: make(map[string]string), + } + + for _, attribute := range bag.Attributes { + k, v, err := convertAttribute(&attribute) + if err == errUnknownAttributeOID { + continue + } + if err != nil { + return nil, err + } + block.Headers[k] = v + } + + switch { + case bag.Id.Equal(oidCertBag): + block.Type = certificateType + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, err + } + block.Bytes = certsData + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + block.Type = privateKeyType + + key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password) + if err != nil { + return nil, err + } + + switch key := key.(type) { + case *rsa.PrivateKey: + block.Bytes = x509.MarshalPKCS1PrivateKey(key) + case *ecdsa.PrivateKey: + block.Bytes, err = x509.MarshalECPrivateKey(key) + if err != nil { + return nil, err + } + default: + return nil, errors.New("found unknown private key type in PKCS#8 wrapping") + } + default: + return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String()) + } + return block, nil +} + +func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) { + isString := false + + switch { + case attribute.Id.Equal(oidFriendlyName): + key = "friendlyName" + isString = true + case attribute.Id.Equal(oidLocalKeyID): + key = "localKeyId" + case attribute.Id.Equal(oidMicrosoftCSPName): + // This key is chosen to match OpenSSL. + key = "Microsoft CSP Name" + isString = true + default: + return "", "", errUnknownAttributeOID + } + + if isString { + if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil { + return "", "", err + } + if value, err = decodeBMPString(attribute.Value.Bytes); err != nil { + return "", "", err + } + } else { + var id []byte + if err := unmarshal(attribute.Value.Bytes, &id); err != nil { + return "", "", err + } + value = hex.EncodeToString(id) + } + + return key, value, nil +} + +// Decode extracts a certificate and private key from pfxData. This function +// assumes that there is only one certificate and only one private key in the +// pfxData; if there are more use ToPEM instead. +func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, nil, err + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + if err != nil { + return nil, nil, err + } + + if len(bags) != 2 { + err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU") + return + } + + for _, bag := range bags { + switch { + case bag.Id.Equal(oidCertBag): + if certificate != nil { + err = errors.New("pkcs12: expected exactly one certificate bag") + } + + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, nil, err + } + certs, err := x509.ParseCertificates(certsData) + if err != nil { + return nil, nil, err + } + if len(certs) != 1 { + err = errors.New("pkcs12: expected exactly one certificate in the certBag") + return nil, nil, err + } + certificate = certs[0] + + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + if privateKey != nil { + err = errors.New("pkcs12: expected exactly one key bag") + return nil, nil, err + } + + if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil { + return nil, nil, err + } + } + } + + if certificate == nil { + return nil, nil, errors.New("pkcs12: certificate missing") + } + if privateKey == nil { + return nil, nil, errors.New("pkcs12: private key missing") + } + + return +} + +func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) { + pfx := new(pfxPdu) + if err := unmarshal(p12Data, pfx); err != nil { + return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error()) + } + + if pfx.Version != 3 { + return nil, nil, NotImplementedError("can only decode v3 PFX PDU's") + } + + if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) { + return nil, nil, NotImplementedError("only password-protected PFX is implemented") + } + + // unmarshal the explicit bytes in the content for type 'data' + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil { + return nil, nil, err + } + + if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 { + return nil, nil, errors.New("pkcs12: no MAC in data") + } + + if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil { + if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 { + // some implementations use an empty byte array + // for the empty string password try one more + // time with empty-empty password + password = nil + err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password) + } + if err != nil { + return nil, nil, err + } + } + + var authenticatedSafe []contentInfo + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil { + return nil, nil, err + } + + if len(authenticatedSafe) != 2 { + return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe") + } + + for _, ci := range authenticatedSafe { + var data []byte + + switch { + case ci.ContentType.Equal(oidDataContentType): + if err := unmarshal(ci.Content.Bytes, &data); err != nil { + return nil, nil, err + } + case ci.ContentType.Equal(oidEncryptedDataContentType): + var encryptedData encryptedData + if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil { + return nil, nil, err + } + if encryptedData.Version != 0 { + return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported") + } + if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil { + return nil, nil, err + } + default: + return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe") + } + + var safeContents []safeBag + if err := unmarshal(data, &safeContents); err != nil { + return nil, nil, err + } + bags = append(bags, safeContents...) + } + + return bags, password, nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/safebags.go b/vendor/golang.org/x/crypto/pkcs12/safebags.go new file mode 100644 index 0000000000..def1f7b98d --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/safebags.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/x509" + "encoding/asn1" + "errors" +) + +var ( + // see https://tools.ietf.org/html/rfc7292#appendix-D + oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1}) + oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2}) + oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3}) +) + +type certBag struct { + Id asn1.ObjectIdentifier + Data []byte `asn1:"tag:0,explicit"` +} + +func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) { + pkinfo := new(encryptedPrivateKeyInfo) + if err = unmarshal(asn1Data, pkinfo); err != nil { + return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error()) + } + + pkData, err := pbDecrypt(pkinfo, password) + if err != nil { + return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error()) + } + + ret := new(asn1.RawValue) + if err = unmarshal(pkData, ret); err != nil { + return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error()) + } + + if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil { + return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error()) + } + + return privateKey, nil +} + +func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) { + bag := new(certBag) + if err := unmarshal(asn1Data, bag); err != nil { + return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error()) + } + if !bag.Id.Equal(oidCertTypeX509Certificate) { + return nil, NotImplementedError("only X509 certificates are supported") + } + return bag.Data, nil +} diff --git a/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go new file mode 100644 index 0000000000..2681af35af --- /dev/null +++ b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go @@ -0,0 +1,78 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lazyregexp is a thin wrapper over regexp, allowing the use of global +// regexp variables without forcing them to be compiled at init. +package lazyregexp + +import ( + "os" + "regexp" + "strings" + "sync" +) + +// Regexp is a wrapper around regexp.Regexp, where the underlying regexp will be +// compiled the first time it is needed. +type Regexp struct { + str string + once sync.Once + rx *regexp.Regexp +} + +func (r *Regexp) re() *regexp.Regexp { + r.once.Do(r.build) + return r.rx +} + +func (r *Regexp) build() { + r.rx = regexp.MustCompile(r.str) + r.str = "" +} + +func (r *Regexp) FindSubmatch(s []byte) [][]byte { + return r.re().FindSubmatch(s) +} + +func (r *Regexp) FindStringSubmatch(s string) []string { + return r.re().FindStringSubmatch(s) +} + +func (r *Regexp) FindStringSubmatchIndex(s string) []int { + return r.re().FindStringSubmatchIndex(s) +} + +func (r *Regexp) ReplaceAllString(src, repl string) string { + return r.re().ReplaceAllString(src, repl) +} + +func (r *Regexp) FindString(s string) string { + return r.re().FindString(s) +} + +func (r *Regexp) FindAllString(s string, n int) []string { + return r.re().FindAllString(s, n) +} + +func (r *Regexp) MatchString(s string) bool { + return r.re().MatchString(s) +} + +func (r *Regexp) SubexpNames() []string { + return r.re().SubexpNames() +} + +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + +// New creates a new lazy regexp, delaying the compiling work until it is first +// needed. If the code is being run as part of tests, the regexp compiling will +// happen immediately. +func New(str string) *Regexp { + lr := &Regexp{str: str} + if inTest { + // In tests, always compile the regexps early. + lr.re() + } + return lr +} diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index 0e03014837..ba97ac356e 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -192,6 +192,21 @@ func (e *InvalidVersionError) Error() string { func (e *InvalidVersionError) Unwrap() error { return e.Err } +// An InvalidPathError indicates a module, import, or file path doesn't +// satisfy all naming constraints. See CheckPath, CheckImportPath, +// and CheckFilePath for specific restrictions. +type InvalidPathError struct { + Kind string // "module", "import", or "file" + Path string + Err error +} + +func (e *InvalidPathError) Error() string { + return fmt.Sprintf("malformed %s path %q: %v", e.Kind, e.Path, e.Err) +} + +func (e *InvalidPathError) Unwrap() error { return e.Err } + // Check checks that a given module path, version pair is valid. // In addition to the path being a valid module path // and the version being a valid semantic version, @@ -296,30 +311,36 @@ func fileNameOK(r rune) bool { // this second requirement is replaced by a requirement that the path // follow the gopkg.in server's conventions. // Third, no path element may begin with a dot. -func CheckPath(path string) error { +func CheckPath(path string) (err error) { + defer func() { + if err != nil { + err = &InvalidPathError{Kind: "module", Path: path, Err: err} + } + }() + if err := checkPath(path, modulePath); err != nil { - return fmt.Errorf("malformed module path %q: %v", path, err) + return err } i := strings.Index(path, "/") if i < 0 { i = len(path) } if i == 0 { - return fmt.Errorf("malformed module path %q: leading slash", path) + return fmt.Errorf("leading slash") } if !strings.Contains(path[:i], ".") { - return fmt.Errorf("malformed module path %q: missing dot in first path element", path) + return fmt.Errorf("missing dot in first path element") } if path[0] == '-' { - return fmt.Errorf("malformed module path %q: leading dash in first path element", path) + return fmt.Errorf("leading dash in first path element") } for _, r := range path[:i] { if !firstPathOK(r) { - return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r) + return fmt.Errorf("invalid char %q in first path element", r) } } if _, _, ok := SplitPathVersion(path); !ok { - return fmt.Errorf("malformed module path %q: invalid version", path) + return fmt.Errorf("invalid version") } return nil } @@ -343,7 +364,7 @@ func CheckPath(path string) error { // subtleties of Unicode. func CheckImportPath(path string) error { if err := checkPath(path, importPath); err != nil { - return fmt.Errorf("malformed import path %q: %v", path, err) + return &InvalidPathError{Kind: "import", Path: path, Err: err} } return nil } @@ -358,12 +379,13 @@ const ( filePath ) -// checkPath checks that a general path is valid. -// It returns an error describing why but not mentioning path. -// Because these checks apply to both module paths and import paths, -// the caller is expected to add the "malformed ___ path %q: " prefix. -// fileName indicates whether the final element of the path is a file name -// (as opposed to a directory name). +// checkPath checks that a general path is valid. kind indicates what +// specific constraints should be applied. +// +// checkPath returns an error describing why the path is not valid. +// Because these checks apply to module, import, and file paths, +// and because other checks may be applied, the caller is expected to wrap +// this error with InvalidPathError. func checkPath(path string, kind pathKind) error { if !utf8.ValidString(path) { return fmt.Errorf("invalid UTF-8") @@ -371,7 +393,7 @@ func checkPath(path string, kind pathKind) error { if path == "" { return fmt.Errorf("empty string") } - if path[0] == '-' { + if path[0] == '-' && kind != filePath { return fmt.Errorf("leading dash") } if strings.Contains(path, "//") { @@ -477,7 +499,7 @@ func checkElem(elem string, kind pathKind) error { // subtleties of Unicode. func CheckFilePath(path string) error { if err := checkPath(path, filePath); err != nil { - return fmt.Errorf("malformed file path %q: %v", path, err) + return &InvalidPathError{Kind: "file", Path: path, Err: err} } return nil } diff --git a/vendor/golang.org/x/mod/module/pseudo.go b/vendor/golang.org/x/mod/module/pseudo.go new file mode 100644 index 0000000000..f04ad37886 --- /dev/null +++ b/vendor/golang.org/x/mod/module/pseudo.go @@ -0,0 +1,250 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Pseudo-versions +// +// Code authors are expected to tag the revisions they want users to use, +// including prereleases. However, not all authors tag versions at all, +// and not all commits a user might want to try will have tags. +// A pseudo-version is a version with a special form that allows us to +// address an untagged commit and order that version with respect to +// other versions we might encounter. +// +// A pseudo-version takes one of the general forms: +// +// (1) vX.0.0-yyyymmddhhmmss-abcdef123456 +// (2) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456 +// (3) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible +// (4) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456 +// (5) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible +// +// If there is no recently tagged version with the right major version vX, +// then form (1) is used, creating a space of pseudo-versions at the bottom +// of the vX version range, less than any tagged version, including the unlikely v0.0.0. +// +// If the most recent tagged version before the target commit is vX.Y.Z or vX.Y.Z+incompatible, +// then the pseudo-version uses form (2) or (3), making it a prerelease for the next +// possible semantic version after vX.Y.Z. The leading 0 segment in the prerelease string +// ensures that the pseudo-version compares less than possible future explicit prereleases +// like vX.Y.(Z+1)-rc1 or vX.Y.(Z+1)-1. +// +// If the most recent tagged version before the target commit is vX.Y.Z-pre or vX.Y.Z-pre+incompatible, +// then the pseudo-version uses form (4) or (5), making it a slightly later prerelease. + +package module + +import ( + "errors" + "fmt" + "strings" + "time" + + "golang.org/x/mod/internal/lazyregexp" + "golang.org/x/mod/semver" +) + +var pseudoVersionRE = lazyregexp.New(`^v[0-9]+\.(0\.0-|\d+\.\d+-([^+]*\.)?0\.)\d{14}-[A-Za-z0-9]+(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$`) + +const PseudoVersionTimestampFormat = "20060102150405" + +// PseudoVersion returns a pseudo-version for the given major version ("v1") +// preexisting older tagged version ("" or "v1.2.3" or "v1.2.3-pre"), revision time, +// and revision identifier (usually a 12-byte commit hash prefix). +func PseudoVersion(major, older string, t time.Time, rev string) string { + if major == "" { + major = "v0" + } + segment := fmt.Sprintf("%s-%s", t.UTC().Format(PseudoVersionTimestampFormat), rev) + build := semver.Build(older) + older = semver.Canonical(older) + if older == "" { + return major + ".0.0-" + segment // form (1) + } + if semver.Prerelease(older) != "" { + return older + ".0." + segment + build // form (4), (5) + } + + // Form (2), (3). + // Extract patch from vMAJOR.MINOR.PATCH + i := strings.LastIndex(older, ".") + 1 + v, patch := older[:i], older[i:] + + // Reassemble. + return v + incDecimal(patch) + "-0." + segment + build +} + +// ZeroPseudoVersion returns a pseudo-version with a zero timestamp and +// revision, which may be used as a placeholder. +func ZeroPseudoVersion(major string) string { + return PseudoVersion(major, "", time.Time{}, "000000000000") +} + +// incDecimal returns the decimal string incremented by 1. +func incDecimal(decimal string) string { + // Scan right to left turning 9s to 0s until you find a digit to increment. + digits := []byte(decimal) + i := len(digits) - 1 + for ; i >= 0 && digits[i] == '9'; i-- { + digits[i] = '0' + } + if i >= 0 { + digits[i]++ + } else { + // digits is all zeros + digits[0] = '1' + digits = append(digits, '0') + } + return string(digits) +} + +// decDecimal returns the decimal string decremented by 1, or the empty string +// if the decimal is all zeroes. +func decDecimal(decimal string) string { + // Scan right to left turning 0s to 9s until you find a digit to decrement. + digits := []byte(decimal) + i := len(digits) - 1 + for ; i >= 0 && digits[i] == '0'; i-- { + digits[i] = '9' + } + if i < 0 { + // decimal is all zeros + return "" + } + if i == 0 && digits[i] == '1' && len(digits) > 1 { + digits = digits[1:] + } else { + digits[i]-- + } + return string(digits) +} + +// IsPseudoVersion reports whether v is a pseudo-version. +func IsPseudoVersion(v string) bool { + return strings.Count(v, "-") >= 2 && semver.IsValid(v) && pseudoVersionRE.MatchString(v) +} + +// IsZeroPseudoVersion returns whether v is a pseudo-version with a zero base, +// timestamp, and revision, as returned by ZeroPseudoVersion. +func IsZeroPseudoVersion(v string) bool { + return v == ZeroPseudoVersion(semver.Major(v)) +} + +// PseudoVersionTime returns the time stamp of the pseudo-version v. +// It returns an error if v is not a pseudo-version or if the time stamp +// embedded in the pseudo-version is not a valid time. +func PseudoVersionTime(v string) (time.Time, error) { + _, timestamp, _, _, err := parsePseudoVersion(v) + if err != nil { + return time.Time{}, err + } + t, err := time.Parse("20060102150405", timestamp) + if err != nil { + return time.Time{}, &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("malformed time %q", timestamp), + } + } + return t, nil +} + +// PseudoVersionRev returns the revision identifier of the pseudo-version v. +// It returns an error if v is not a pseudo-version. +func PseudoVersionRev(v string) (rev string, err error) { + _, _, rev, _, err = parsePseudoVersion(v) + return +} + +// PseudoVersionBase returns the canonical parent version, if any, upon which +// the pseudo-version v is based. +// +// If v has no parent version (that is, if it is "vX.0.0-[…]"), +// PseudoVersionBase returns the empty string and a nil error. +func PseudoVersionBase(v string) (string, error) { + base, _, _, build, err := parsePseudoVersion(v) + if err != nil { + return "", err + } + + switch pre := semver.Prerelease(base); pre { + case "": + // vX.0.0-yyyymmddhhmmss-abcdef123456 → "" + if build != "" { + // Pseudo-versions of the form vX.0.0-yyyymmddhhmmss-abcdef123456+incompatible + // are nonsensical: the "vX.0.0-" prefix implies that there is no parent tag, + // but the "+incompatible" suffix implies that the major version of + // the parent tag is not compatible with the module's import path. + // + // There are a few such entries in the index generated by proxy.golang.org, + // but we believe those entries were generated by the proxy itself. + return "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("lacks base version, but has build metadata %q", build), + } + } + return "", nil + + case "-0": + // vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z + // vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z+incompatible + base = strings.TrimSuffix(base, pre) + i := strings.LastIndexByte(base, '.') + if i < 0 { + panic("base from parsePseudoVersion missing patch number: " + base) + } + patch := decDecimal(base[i+1:]) + if patch == "" { + // vX.0.0-0 is invalid, but has been observed in the wild in the index + // generated by requests to proxy.golang.org. + // + // NOTE(bcmills): I cannot find a historical bug that accounts for + // pseudo-versions of this form, nor have I seen such versions in any + // actual go.mod files. If we find actual examples of this form and a + // reasonable theory of how they came into existence, it seems fine to + // treat them as equivalent to vX.0.0 (especially since the invalid + // pseudo-versions have lower precedence than the real ones). For now, we + // reject them. + return "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("version before %s would have negative patch number", base), + } + } + return base[:i+1] + patch + build, nil + + default: + // vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z-pre + // vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z-pre+incompatible + if !strings.HasSuffix(base, ".0") { + panic(`base from parsePseudoVersion missing ".0" before date: ` + base) + } + return strings.TrimSuffix(base, ".0") + build, nil + } +} + +var errPseudoSyntax = errors.New("syntax error") + +func parsePseudoVersion(v string) (base, timestamp, rev, build string, err error) { + if !IsPseudoVersion(v) { + return "", "", "", "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: errPseudoSyntax, + } + } + build = semver.Build(v) + v = strings.TrimSuffix(v, build) + j := strings.LastIndex(v, "-") + v, rev = v[:j], v[j+1:] + i := strings.LastIndex(v, "-") + if j := strings.LastIndex(v, "."); j > i { + base = v[:j] // "vX.Y.Z-pre.0" or "vX.Y.(Z+1)-0" + timestamp = v[j+1:] + } else { + base = v[:i] // "vX.0.0" + timestamp = v[i+1:] + } + return base, timestamp, rev, build, nil +} diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go index 4338f35177..7be398f80d 100644 --- a/vendor/golang.org/x/mod/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -22,6 +22,8 @@ // as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. package semver +import "sort" + // parsed returns the parsed form of a semantic version string. type parsed struct { major string @@ -150,6 +152,24 @@ func Max(v, w string) string { return w } +// ByVersion implements sort.Interface for sorting semantic version strings. +type ByVersion []string + +func (vs ByVersion) Len() int { return len(vs) } +func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } +func (vs ByVersion) Less(i, j int) bool { + cmp := Compare(vs[i], vs[j]) + if cmp != 0 { + return cmp < 0 + } + return vs[i] < vs[j] +} + +// Sort sorts a list of semantic version strings using ByVersion. +func Sort(list []string) { + sort.Sort(ByVersion(list)) +} + func parse(v string) (p parsed, ok bool) { if v == "" || v[0] != 'v' { p.err = "missing v prefix" diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index faeea16a44..86b734aec2 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1539,11 +1539,19 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { return err } } - if err == io.EOF { - sawEOF = true - err = nil - } else if err != nil { - return err + if err != nil { + cc.mu.Lock() + bodyClosed := cs.reqBodyClosed + cc.mu.Unlock() + switch { + case bodyClosed: + return errStopReqBodyWrite + case err == io.EOF: + sawEOF = true + err = nil + default: + return err + } } remain := buf[:n] diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index 41ced10acd..ccc23ee0ae 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -92,9 +92,10 @@ func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { // JSON key file types. const ( - serviceAccountKey = "service_account" - userCredentialsKey = "authorized_user" - externalAccountKey = "external_account" + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" + externalAccountKey = "external_account" + impersonatedServiceAccount = "impersonated_service_account" ) // credentialsFile is the unmarshalled representation of a credentials file. @@ -121,9 +122,13 @@ type credentialsFile struct { TokenURLExternal string `json:"token_url"` TokenInfoURL string `json:"token_info_url"` ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + Delegates []string `json:"delegates"` CredentialSource externalaccount.CredentialSource `json:"credential_source"` QuotaProjectID string `json:"quota_project_id"` WorkforcePoolUserProject string `json:"workforce_pool_user_project"` + + // Service account impersonation + SourceCredentials *credentialsFile `json:"source_credentials"` } func (f *credentialsFile) jwtConfig(scopes []string, subject string) *jwt.Config { @@ -180,6 +185,23 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar WorkforcePoolUserProject: f.WorkforcePoolUserProject, } return cfg.TokenSource(ctx) + case impersonatedServiceAccount: + if f.ServiceAccountImpersonationURL == "" || f.SourceCredentials == nil { + return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials") + } + + ts, err := f.SourceCredentials.tokenSource(ctx, params) + if err != nil { + return nil, err + } + imp := externalaccount.ImpersonateTokenSource{ + Ctx: ctx, + URL: f.ServiceAccountImpersonationURL, + Scopes: params.Scopes, + Ts: ts, + Delegates: f.Delegates, + } + return oauth2.ReuseTokenSource(nil, imp), nil case "": return nil, errors.New("missing 'type' field in credentials") default: diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index a1e36c0c70..bc3ce53172 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -140,11 +140,11 @@ func (c *Config) tokenSource(ctx context.Context, tokenURLValidPats []*regexp.Re } scopes := c.Scopes ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} - imp := impersonateTokenSource{ - ctx: ctx, - url: c.ServiceAccountImpersonationURL, - scopes: scopes, - ts: oauth2.ReuseTokenSource(nil, ts), + imp := ImpersonateTokenSource{ + Ctx: ctx, + URL: c.ServiceAccountImpersonationURL, + Scopes: scopes, + Ts: oauth2.ReuseTokenSource(nil, ts), } return oauth2.ReuseTokenSource(nil, imp), nil } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go index 64edb56001..8251fc85e0 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go @@ -29,30 +29,44 @@ type impersonateTokenResponse struct { ExpireTime string `json:"expireTime"` } -type impersonateTokenSource struct { - ctx context.Context - ts oauth2.TokenSource +// ImpersonateTokenSource uses a source credential, stored in Ts, to request an access token to the provided URL. +// Scopes can be defined when the access token is requested. +type ImpersonateTokenSource struct { + // Ctx is the execution context of the impersonation process + // used to perform http call to the URL. Required + Ctx context.Context + // Ts is the source credential used to generate a token on the + // impersonated service account. Required. + Ts oauth2.TokenSource - url string - scopes []string + // URL is the endpoint to call to generate a token + // on behalf the service account. Required. + URL string + // Scopes that the impersonated credential should have. Required. + Scopes []string + // Delegates are the service account email addresses in a delegation chain. + // Each service account must be granted roles/iam.serviceAccountTokenCreator + // on the next service account in the chain. Optional. + Delegates []string } // Token performs the exchange to get a temporary service account token to allow access to GCP. -func (its impersonateTokenSource) Token() (*oauth2.Token, error) { +func (its ImpersonateTokenSource) Token() (*oauth2.Token, error) { reqBody := generateAccessTokenReq{ - Lifetime: "3600s", - Scope: its.scopes, + Lifetime: "3600s", + Scope: its.Scopes, + Delegates: its.Delegates, } b, err := json.Marshal(reqBody) if err != nil { return nil, fmt.Errorf("oauth2/google: unable to marshal request: %v", err) } - client := oauth2.NewClient(its.ctx, its.ts) - req, err := http.NewRequest("POST", its.url, bytes.NewReader(b)) + client := oauth2.NewClient(its.Ctx, its.Ts) + req, err := http.NewRequest("POST", its.URL, bytes.NewReader(b)) if err != nil { return nil, fmt.Errorf("oauth2/google: unable to create impersonation request: %v", err) } - req = req.WithContext(its.ctx) + req = req.WithContext(its.Ctx) req.Header.Set("Content-Type", "application/json") resp, err := client.Do(req) diff --git a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go index 87ae9d2a33..c9b69937a0 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.5 // +build go1.5 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_plan9.go index c07c798bc5..98bf56b732 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_plan9.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.5 // +build !go1.5 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/race.go b/vendor/golang.org/x/sys/plan9/race.go index 42edd93ef9..62377d2ff9 100644 --- a/vendor/golang.org/x/sys/plan9/race.go +++ b/vendor/golang.org/x/sys/plan9/race.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build plan9 && race // +build plan9,race package plan9 diff --git a/vendor/golang.org/x/sys/plan9/race0.go b/vendor/golang.org/x/sys/plan9/race0.go index c89cf8fc0d..f8da30876d 100644 --- a/vendor/golang.org/x/sys/plan9/race0.go +++ b/vendor/golang.org/x/sys/plan9/race0.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build plan9 && !race // +build plan9,!race package plan9 diff --git a/vendor/golang.org/x/sys/plan9/str.go b/vendor/golang.org/x/sys/plan9/str.go index 4f7f9ad7c8..55fa8d025e 100644 --- a/vendor/golang.org/x/sys/plan9/str.go +++ b/vendor/golang.org/x/sys/plan9/str.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build plan9 // +build plan9 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go index e7363a2f54..602473cba3 100644 --- a/vendor/golang.org/x/sys/plan9/syscall.go +++ b/vendor/golang.org/x/sys/plan9/syscall.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build plan9 // +build plan9 // Package plan9 contains an interface to the low-level operating system diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go index 6819bc2094..3f40b9bd74 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -plan9 -tags plan9,386 syscall_plan9.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build plan9 && 386 // +build plan9,386 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go index 418abbbfc7..0e6a96aa4f 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -plan9 -tags plan9,amd64 syscall_plan9.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build plan9 && amd64 // +build plan9,amd64 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go index 3e8a1a58ca..244c501b77 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -plan9 -tags plan9,arm syscall_plan9.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build plan9 && arm // +build plan9,arm package plan9 diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 474efad0e0..7d3c060e12 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -149,7 +149,7 @@ To add a constant, add the header that includes it to the appropriate variable. Then, edit the regex (if necessary) to match the desired constant. Avoid making the regex too broad to avoid matching unintended constants. -### mkmerge.go +### internal/mkmerge This program is used to extract duplicate const, func, and type declarations from the generated architecture-specific files listed below, and merge these diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 396aadf86d..ee73623489 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -50,7 +50,7 @@ if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) $cmd docker build --tag generate:$GOOS $GOOS - $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")" && /bin/pwd):/build generate:$GOOS + $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && /bin/pwd):/build generate:$GOOS exit fi diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 850aafec1e..4945739ea0 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -54,7 +54,7 @@ includes_AIX=' includes_Darwin=' #define _DARWIN_C_SOURCE -#define KERNEL +#define KERNEL 1 #define _DARWIN_USE_64_BIT_INODE #define __APPLE_USE_RFC_3542 #include @@ -75,6 +75,7 @@ includes_Darwin=' #include #include #include +#include #include #include #include @@ -82,6 +83,9 @@ includes_Darwin=' #include #include #include + +// for backwards compatibility because moved TIOCREMOTE to Kernel.framework after MacOSX12.0.sdk. +#define TIOCREMOTE 0x80047469 ' includes_DragonFly=' @@ -235,6 +239,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -466,7 +471,6 @@ ccflags="$@" $2 !~ /^EQUIV_/ && $2 !~ /^EXPR_/ && $2 !~ /^EVIOC/ && - $2 !~ /^EV_/ && $2 ~ /^E[A-Z0-9_]+$/ || $2 ~ /^B[0-9_]+$/ || $2 ~ /^(OLD|NEW)DEV$/ || @@ -517,7 +521,7 @@ ccflags="$@" $2 ~ /^HW_MACHINE$/ || $2 ~ /^SYSCTL_VERS/ || $2 !~ "MNT_BITS" && - $2 ~ /^(MS|MNT|UMOUNT)_/ || + $2 ~ /^(MS|MNT|MOUNT|UMOUNT)_/ || $2 ~ /^NS_GET_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|TFD)_/ || diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go index 8bf4570594..5f63147e06 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go @@ -34,3 +34,52 @@ func ParseUnixCredentials(m *SocketControlMessage) (*Ucred, error) { ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) return &ucred, nil } + +// PktInfo4 encodes Inet4Pktinfo into a socket control message of type IP_PKTINFO. +func PktInfo4(info *Inet4Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet4Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IP + h.Type = IP_PKTINFO + h.SetLen(CmsgLen(SizeofInet4Pktinfo)) + *(*Inet4Pktinfo)(h.data(0)) = *info + return b +} + +// PktInfo6 encodes Inet6Pktinfo into a socket control message of type IPV6_PKTINFO. +func PktInfo6(info *Inet6Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet6Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IPV6 + h.Type = IPV6_PKTINFO + h.SetLen(CmsgLen(SizeofInet6Pktinfo)) + *(*Inet6Pktinfo)(h.data(0)) = *info + return b +} + +// ParseOrigDstAddr decodes a socket control message containing the original +// destination address. To receive such a message the IP_RECVORIGDSTADDR or +// IPV6_RECVORIGDSTADDR option must be enabled on the socket. +func ParseOrigDstAddr(m *SocketControlMessage) (Sockaddr, error) { + switch { + case m.Header.Level == SOL_IP && m.Header.Type == IP_ORIGDSTADDR: + pp := (*RawSockaddrInet4)(unsafe.Pointer(&m.Data[0])) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.Addr = pp.Addr + return sa, nil + + case m.Header.Level == SOL_IPV6 && m.Header.Type == IPV6_ORIGDSTADDR: + pp := (*RawSockaddrInet6)(unsafe.Pointer(&m.Data[0])) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + sa.Addr = pp.Addr + return sa, nil + + default: + return nil, EINVAL + } +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index d8efb715ff..6192750ce3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -70,9 +70,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil } @@ -85,9 +83,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil } @@ -261,9 +257,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -272,9 +266,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return nil, EAFNOSUPPORT @@ -385,6 +377,11 @@ func (w WaitStatus) TrapCause() int { return -1 } //sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys fsyncRange(fd int, how int, start int64, length int64) (err error) = fsync_range +func Fsync(fd int) error { + return fsyncRange(fd, O_SYNC, 0, 0) +} + /* * Direct access */ @@ -401,7 +398,6 @@ func (w WaitStatus) TrapCause() int { return -1 } //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Fdatasync(fd int) (err error) -//sys Fsync(fd int) (err error) // readdir_r //sysnb Getpgid(pid int) (pgid int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 95ac3946b5..0ce4523261 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -163,9 +163,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -179,9 +177,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -210,9 +206,7 @@ func (sa *SockaddrDatalink) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Nlen = sa.Nlen sa.raw.Alen = sa.Alen sa.raw.Slen = sa.Slen - for i := 0; i < len(sa.raw.Data); i++ { - sa.raw.Data[i] = sa.Data[i] - } + sa.raw.Data = sa.Data return unsafe.Pointer(&sa.raw), SizeofSockaddrDatalink, nil } @@ -228,9 +222,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa.Nlen = pp.Nlen sa.Alen = pp.Alen sa.Slen = pp.Slen - for i := 0; i < len(sa.Data); i++ { - sa.Data[i] = pp.Data[i] - } + sa.Data = pp.Data return sa, nil case AF_UNIX: @@ -262,9 +254,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -273,9 +263,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return anyToSockaddrGOOS(fd, rsa) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 23f6b57606..8826f41435 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -48,6 +48,30 @@ func (sa *SockaddrCtl) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrCtl, nil } +// SockaddrVM implements the Sockaddr interface for AF_VSOCK type sockets. +// SockaddrVM provides access to Darwin VM sockets: a mechanism that enables +// bidirectional communication between a hypervisor and its guest virtual +// machines. +type SockaddrVM struct { + // CID and Port specify a context ID and port address for a VM socket. + // Guests have a unique CID, and hosts may have a well-known CID of: + // - VMADDR_CID_HYPERVISOR: refers to the hypervisor process. + // - VMADDR_CID_LOCAL: refers to local communication (loopback). + // - VMADDR_CID_HOST: refers to other processes on the host. + CID uint32 + Port uint32 + raw RawSockaddrVM +} + +func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Len = SizeofSockaddrVM + sa.raw.Family = AF_VSOCK + sa.raw.Port = sa.Port + sa.raw.Cid = sa.CID + + return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil +} + func anyToSockaddrGOOS(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_SYSTEM: @@ -58,6 +82,13 @@ func anyToSockaddrGOOS(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa.Unit = pp.Sc_unit return sa, nil } + case AF_VSOCK: + pp := (*RawSockaddrVM)(unsafe.Pointer(rsa)) + sa := &SockaddrVM{ + CID: pp.Cid, + Port: pp.Port, + } + return sa, nil } return nil, EAFNOSUPPORT } @@ -399,8 +430,25 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { return x, err } -func SysctlKinfoProcSlice(name string) ([]KinfoProc, error) { - mib, err := sysctlmib(name) +func SysctlKinfoProc(name string, args ...int) (*KinfoProc, error) { + mib, err := sysctlmib(name, args...) + if err != nil { + return nil, err + } + + var kinfo KinfoProc + n := uintptr(SizeofKinfoProc) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&kinfo)), &n, nil, 0); err != nil { + return nil, err + } + if n != SizeofKinfoProc { + return nil, EIO + } + return &kinfo, nil +} + +func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { + mib, err := sysctlmib(name, args...) if err != nil { return nil, err } @@ -433,6 +481,11 @@ func SysctlKinfoProcSlice(name string) ([]KinfoProc, error) { //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) +//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) +//sys shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) +//sys shmdt(addr uintptr) (err error) +//sys shmget(key int, size int, flag int) (id int, err error) + /* * Exposed directly */ @@ -590,10 +643,6 @@ func SysctlKinfoProcSlice(name string) ([]KinfoProc, error) { // Msgget // Msgsnd // Msgrcv -// Shmat -// Shmctl -// Shmdt -// Shmget // Shm_open // Shm_unlink // Sem_open diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index a8ae8a570b..4bc5baf77d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -13,7 +13,6 @@ package unix import ( "encoding/binary" - "runtime" "syscall" "unsafe" ) @@ -146,6 +145,15 @@ func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error return ppoll(&fds[0], len(fds), timeout, sigmask) } +func Poll(fds []PollFd, timeout int) (n int, err error) { + var ts *Timespec + if timeout >= 0 { + ts = new(Timespec) + *ts = NsecToTimespec(int64(timeout) * 1e6) + } + return Ppoll(fds, ts, nil) +} + //sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) func Readlink(path string, buf []byte) (n int, err error) { @@ -364,9 +372,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil } @@ -379,9 +385,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil } @@ -430,9 +434,7 @@ func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Hatype = sa.Hatype sa.raw.Pkttype = sa.Pkttype sa.raw.Halen = sa.Halen - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil } @@ -847,12 +849,10 @@ func (sa *SockaddrTIPC) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Addr == nil { return nil, 0, EINVAL } - sa.raw.Family = AF_TIPC sa.raw.Scope = int8(sa.Scope) sa.raw.Addrtype = sa.Addr.tipcAddrtype() sa.raw.Addr = sa.Addr.tipcAddr() - return unsafe.Pointer(&sa.raw), SizeofSockaddrTIPC, nil } @@ -866,9 +866,7 @@ type SockaddrL2TPIP struct { func (sa *SockaddrL2TPIP) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_INET sa.raw.Conn_id = sa.ConnId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP, nil } @@ -884,9 +882,7 @@ func (sa *SockaddrL2TPIP6) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_INET6 sa.raw.Conn_id = sa.ConnId sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP6, nil } @@ -982,9 +978,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa.Hatype = pp.Hatype sa.Pkttype = pp.Pkttype sa.Halen = pp.Halen - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_UNIX: @@ -1023,18 +1017,14 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { pp := (*RawSockaddrL2TPIP)(unsafe.Pointer(rsa)) sa := new(SockaddrL2TPIP) sa.ConnId = pp.Conn_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil default: pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } @@ -1050,9 +1040,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrL2TPIP6) sa.ConnId = pp.Conn_id sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil default: pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) @@ -1060,9 +1048,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } @@ -1789,6 +1775,16 @@ func Mount(source string, target string, fstype string, flags uintptr, data stri return mount(source, target, fstype, flags, datap) } +//sys mountSetattr(dirfd int, pathname string, flags uint, attr *MountAttr, size uintptr) (err error) = SYS_MOUNT_SETATTR + +// MountSetattr is a wrapper for mount_setattr(2). +// https://man7.org/linux/man-pages/man2/mount_setattr.2.html +// +// Requires kernel >= 5.12. +func MountSetattr(dirfd int, pathname string, flags uint, attr *MountAttr) error { + return mountSetattr(dirfd, pathname, flags, attr, unsafe.Sizeof(*attr)) +} + func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) @@ -1820,11 +1816,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Dup(oldfd int) (fd int, err error) func Dup2(oldfd, newfd int) error { - // Android O and newer blocks dup2; riscv and arm64 don't implement dup2. - if runtime.GOOS == "android" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "arm64" { - return Dup3(oldfd, newfd, 0) - } - return dup2(oldfd, newfd) + return Dup3(oldfd, newfd, 0) } //sys Dup3(oldfd int, newfd int, flags int) (err error) @@ -2315,6 +2307,11 @@ type RemoteIovec struct { //sys PidfdOpen(pid int, flags int) (fd int, err error) = SYS_PIDFD_OPEN //sys PidfdGetfd(pidfd int, targetfd int, flags int) (fd int, err error) = SYS_PIDFD_GETFD +//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) +//sys shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) +//sys shmdt(addr uintptr) (err error) +//sys shmget(key int, size int, flag int) (id int, err error) + /* * Unimplemented */ @@ -2396,10 +2393,6 @@ type RemoteIovec struct { // SetRobustList // SetThreadArea // SetTidAddress -// Shmat -// Shmctl -// Shmdt -// Shmget // Sigaltstack // Swapoff // Swapon diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index 9ce92216ce..5f757e8aa7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -21,7 +21,6 @@ func setTimeval(sec, usec int64) Timeval { // 64-bit file system and 32-bit uid calls // (386 default is 32-bit file system and 16-bit uid). -//sys dup2(oldfd int, newfd int) (err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64_64 //sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 @@ -353,12 +352,3 @@ func (cmsg *Cmsghdr) SetLen(length int) { func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint32(length) } - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index f18b63a8fa..4299125aa7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -7,7 +7,6 @@ package unix -//sys dup2(oldfd int, newfd int) (err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -138,15 +137,6 @@ func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint64(length) } -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} - //sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 7d1dbd3deb..79edeb9cb1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -46,7 +46,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // 64-bit file system and 32-bit uid calls // (16-bit uid calls are not always supported in newer kernels) -//sys dup2(oldfd int, newfd int) (err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 @@ -228,15 +227,6 @@ func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint32(length) } -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} - //sys armSyncFileRange(fd int, flags int, off int64, n int64) (err error) = SYS_ARM_SYNC_FILE_RANGE func SyncFileRange(fd int, off int64, n int64, flags int) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 75b40a9b83..862890de29 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -180,27 +180,11 @@ func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint64(length) } -// dup2 exists because func Dup3 in syscall_linux.go references -// it in an unreachable path. dup2 isn't available on arm64. -func dup2(oldfd int, newfd int) error - func Pause() error { _, err := ppoll(nil, 0, nil, nil) return err } -func Poll(fds []PollFd, timeout int) (n int, err error) { - var ts *Timespec - if timeout >= 0 { - ts = new(Timespec) - *ts = NsecToTimespec(int64(timeout) * 1e6) - } - if len(fds) == 0 { - return ppoll(nil, 0, ts, nil) - } - return ppoll(&fds[0], len(fds), ts, nil) -} - //sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 36b27529a5..8932e34ad2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -8,7 +8,6 @@ package unix -//sys dup2(oldfd int, newfd int) (err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -195,12 +194,3 @@ func (cmsg *Cmsghdr) SetLen(length int) { func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint64(length) } - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index d0b8b232a0..7821c25d9f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -15,7 +15,6 @@ import ( func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) -//sys dup2(oldfd int, newfd int) (err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -207,12 +206,3 @@ func (cmsg *Cmsghdr) SetLen(length int) { func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint32(length) } - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index c28fa34d38..c5053a0f03 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -12,7 +12,6 @@ import ( "unsafe" ) -//sys dup2(oldfd int, newfd int) (err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 @@ -216,15 +215,6 @@ func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint32(length) } -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} - //sys syncFileRange2(fd int, flags int, off int64, n int64) (err error) = SYS_SYNC_FILE_RANGE2 func SyncFileRange(fd int, off int64, n int64, flags int) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 2ba4aa7616..25786c4216 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -8,7 +8,6 @@ package unix -//sys dup2(oldfd int, newfd int) (err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -102,15 +101,6 @@ func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint64(length) } -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} - //sys syncFileRange2(fd int, flags int, off int64, n int64) (err error) = SYS_SYNC_FILE_RANGE2 func SyncFileRange(fd int, off int64, n int64, flags int) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 97468c5961..6f9f710414 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -166,18 +166,6 @@ func Pause() error { return err } -func Poll(fds []PollFd, timeout int) (n int, err error) { - var ts *Timespec - if timeout >= 0 { - ts = new(Timespec) - *ts = NsecToTimespec(int64(timeout) * 1e6) - } - if len(fds) == 0 { - return ppoll(nil, 0, ts, nil) - } - return ppoll(&fds[0], len(fds), ts, nil) -} - func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { return Renameat2(olddirfd, oldpath, newdirfd, newpath, 0) } @@ -194,7 +182,3 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } - -// dup2 exists because func Dup3 in syscall_linux.go references -// it in an unreachable path. dup2 isn't available on arm64. -func dup2(oldfd int, newfd int) error diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 3236feec4c..6aa59cb270 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -11,7 +11,6 @@ import ( "unsafe" ) -//sys dup2(oldfd int, newfd int) (err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -298,15 +297,6 @@ func Shutdown(s, how int) error { return nil } -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} - //sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 0544d9677a..bbe8d174f8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -9,7 +9,6 @@ package unix //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 -//sys dup2(oldfd int, newfd int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 @@ -118,12 +117,3 @@ func (cmsg *Cmsghdr) SetLen(length int) { func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint64(length) } - -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) - -func Poll(fds []PollFd, timeout int) (n int, err error) { - if len(fds) == 0 { - return poll(nil, 0, timeout) - } - return poll(&fds[0], len(fds), timeout) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index d2a6495c7e..8b88ac2133 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -92,9 +92,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil } @@ -107,9 +105,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil } @@ -417,9 +413,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -428,9 +422,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return nil, EAFNOSUPPORT diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 1ffd8bfcfb..5fb76a1468 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -67,9 +67,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -83,9 +81,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -144,9 +140,7 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -155,9 +149,7 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return nil, EAFNOSUPPORT diff --git a/vendor/golang.org/x/sys/unix/sysvshm_linux.go b/vendor/golang.org/x/sys/unix/sysvshm_linux.go new file mode 100644 index 0000000000..2c3a4437f0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sysvshm_linux.go @@ -0,0 +1,21 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux +// +build linux + +package unix + +import "runtime" + +// SysvShmCtl performs control operations on the shared memory segment +// specified by id. +func SysvShmCtl(id, cmd int, desc *SysvShmDesc) (result int, err error) { + if runtime.GOARCH == "arm" || + runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" { + cmd |= ipc_64 + } + + return shmctl(id, cmd, desc) +} diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go new file mode 100644 index 0000000000..0bb4c8de55 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -0,0 +1,61 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (darwin && !ios) || linux +// +build darwin,!ios linux + +package unix + +import ( + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" +) + +// SysvShmAttach attaches the Sysv shared memory segment associated with the +// shared memory identifier id. +func SysvShmAttach(id int, addr uintptr, flag int) ([]byte, error) { + addr, errno := shmat(id, addr, flag) + if errno != nil { + return nil, errno + } + + // Retrieve the size of the shared memory to enable slice creation + var info SysvShmDesc + + _, err := SysvShmCtl(id, IPC_STAT, &info) + if err != nil { + // release the shared memory if we can't find the size + + // ignoring error from shmdt as there's nothing sensible to return here + shmdt(addr) + return nil, err + } + + // Use unsafe to convert addr into a []byte. + // TODO: convert to unsafe.Slice once we can assume Go 1.17 + var b []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b)) + hdr.Data = unsafe.Pointer(addr) + hdr.Cap = int(info.Segsz) + hdr.Len = int(info.Segsz) + return b, nil +} + +// SysvShmDetach unmaps the shared memory slice returned from SysvShmAttach. +// +// It is not safe to use the slice after calling this function. +func SysvShmDetach(data []byte) error { + if len(data) == 0 { + return EINVAL + } + + return shmdt(uintptr(unsafe.Pointer(&data[0]))) +} + +// SysvShmGet returns the Sysv shared memory identifier associated with key. +// If the IPC_CREAT flag is specified a new segment is created. +func SysvShmGet(key, size, flag int) (id int, err error) { + return shmget(key, size, flag) +} diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go new file mode 100644 index 0000000000..71bddefdb8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go @@ -0,0 +1,14 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && !ios +// +build darwin,!ios + +package unix + +// SysvShmCtl performs control operations on the shared memory segment +// specified by id. +func SysvShmCtl(id, cmd int, desc *SysvShmDesc) (result int, err error) { + return shmctl(id, cmd, desc) +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index a3a45fec59..476a1c7e77 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -12,1556 +12,1582 @@ package unix import "syscall" const ( - AF_APPLETALK = 0x10 - AF_CCITT = 0xa - AF_CHAOS = 0x5 - AF_CNT = 0x15 - AF_COIP = 0x14 - AF_DATAKIT = 0x9 - AF_DECnet = 0xc - AF_DLI = 0xd - AF_E164 = 0x1c - AF_ECMA = 0x8 - AF_HYLINK = 0xf - AF_IEEE80211 = 0x25 - AF_IMPLINK = 0x3 - AF_INET = 0x2 - AF_INET6 = 0x1e - AF_IPX = 0x17 - AF_ISDN = 0x1c - AF_ISO = 0x7 - AF_LAT = 0xe - AF_LINK = 0x12 - AF_LOCAL = 0x1 - AF_MAX = 0x29 - AF_NATM = 0x1f - AF_NDRV = 0x1b - AF_NETBIOS = 0x21 - AF_NS = 0x6 - AF_OSI = 0x7 - AF_PPP = 0x22 - AF_PUP = 0x4 - AF_RESERVED_36 = 0x24 - AF_ROUTE = 0x11 - AF_SIP = 0x18 - AF_SNA = 0xb - AF_SYSTEM = 0x20 - AF_SYS_CONTROL = 0x2 - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_UTUN = 0x26 - AF_VSOCK = 0x28 - ALTWERASE = 0x200 - ATTR_BIT_MAP_COUNT = 0x5 - ATTR_CMN_ACCESSMASK = 0x20000 - ATTR_CMN_ACCTIME = 0x1000 - ATTR_CMN_ADDEDTIME = 0x10000000 - ATTR_CMN_BKUPTIME = 0x2000 - ATTR_CMN_CHGTIME = 0x800 - ATTR_CMN_CRTIME = 0x200 - ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000 - ATTR_CMN_DEVID = 0x2 - ATTR_CMN_DOCUMENT_ID = 0x100000 - ATTR_CMN_ERROR = 0x20000000 - ATTR_CMN_EXTENDED_SECURITY = 0x400000 - ATTR_CMN_FILEID = 0x2000000 - ATTR_CMN_FLAGS = 0x40000 - ATTR_CMN_FNDRINFO = 0x4000 - ATTR_CMN_FSID = 0x4 - ATTR_CMN_FULLPATH = 0x8000000 - ATTR_CMN_GEN_COUNT = 0x80000 - ATTR_CMN_GRPID = 0x10000 - ATTR_CMN_GRPUUID = 0x1000000 - ATTR_CMN_MODTIME = 0x400 - ATTR_CMN_NAME = 0x1 - ATTR_CMN_NAMEDATTRCOUNT = 0x80000 - ATTR_CMN_NAMEDATTRLIST = 0x100000 - ATTR_CMN_OBJID = 0x20 - ATTR_CMN_OBJPERMANENTID = 0x40 - ATTR_CMN_OBJTAG = 0x10 - ATTR_CMN_OBJTYPE = 0x8 - ATTR_CMN_OWNERID = 0x8000 - ATTR_CMN_PARENTID = 0x4000000 - ATTR_CMN_PAROBJID = 0x80 - ATTR_CMN_RETURNED_ATTRS = 0x80000000 - ATTR_CMN_SCRIPT = 0x100 - ATTR_CMN_SETMASK = 0x51c7ff00 - ATTR_CMN_USERACCESS = 0x200000 - ATTR_CMN_UUID = 0x800000 - ATTR_CMN_VALIDMASK = 0xffffffff - ATTR_CMN_VOLSETMASK = 0x6700 - ATTR_FILE_ALLOCSIZE = 0x4 - ATTR_FILE_CLUMPSIZE = 0x10 - ATTR_FILE_DATAALLOCSIZE = 0x400 - ATTR_FILE_DATAEXTENTS = 0x800 - ATTR_FILE_DATALENGTH = 0x200 - ATTR_FILE_DEVTYPE = 0x20 - ATTR_FILE_FILETYPE = 0x40 - ATTR_FILE_FORKCOUNT = 0x80 - ATTR_FILE_FORKLIST = 0x100 - ATTR_FILE_IOBLOCKSIZE = 0x8 - ATTR_FILE_LINKCOUNT = 0x1 - ATTR_FILE_RSRCALLOCSIZE = 0x2000 - ATTR_FILE_RSRCEXTENTS = 0x4000 - ATTR_FILE_RSRCLENGTH = 0x1000 - ATTR_FILE_SETMASK = 0x20 - ATTR_FILE_TOTALSIZE = 0x2 - ATTR_FILE_VALIDMASK = 0x37ff - ATTR_VOL_ALLOCATIONCLUMP = 0x40 - ATTR_VOL_ATTRIBUTES = 0x40000000 - ATTR_VOL_CAPABILITIES = 0x20000 - ATTR_VOL_DIRCOUNT = 0x400 - ATTR_VOL_ENCODINGSUSED = 0x10000 - ATTR_VOL_FILECOUNT = 0x200 - ATTR_VOL_FSTYPE = 0x1 - ATTR_VOL_INFO = 0x80000000 - ATTR_VOL_IOBLOCKSIZE = 0x80 - ATTR_VOL_MAXOBJCOUNT = 0x800 - ATTR_VOL_MINALLOCATION = 0x20 - ATTR_VOL_MOUNTEDDEVICE = 0x8000 - ATTR_VOL_MOUNTFLAGS = 0x4000 - ATTR_VOL_MOUNTPOINT = 0x1000 - ATTR_VOL_NAME = 0x2000 - ATTR_VOL_OBJCOUNT = 0x100 - ATTR_VOL_QUOTA_SIZE = 0x10000000 - ATTR_VOL_RESERVED_SIZE = 0x20000000 - ATTR_VOL_SETMASK = 0x80002000 - ATTR_VOL_SIGNATURE = 0x2 - ATTR_VOL_SIZE = 0x4 - ATTR_VOL_SPACEAVAIL = 0x10 - ATTR_VOL_SPACEFREE = 0x8 - ATTR_VOL_UUID = 0x40000 - ATTR_VOL_VALIDMASK = 0xf007ffff - B0 = 0x0 - B110 = 0x6e - B115200 = 0x1c200 - B1200 = 0x4b0 - B134 = 0x86 - B14400 = 0x3840 - B150 = 0x96 - B1800 = 0x708 - B19200 = 0x4b00 - B200 = 0xc8 - B230400 = 0x38400 - B2400 = 0x960 - B28800 = 0x7080 - B300 = 0x12c - B38400 = 0x9600 - B4800 = 0x12c0 - B50 = 0x32 - B57600 = 0xe100 - B600 = 0x258 - B7200 = 0x1c20 - B75 = 0x4b - B76800 = 0x12c00 - B9600 = 0x2580 - BIOCFLUSH = 0x20004268 - BIOCGBLEN = 0x40044266 - BIOCGDLT = 0x4004426a - BIOCGDLTLIST = 0xc00c4279 - BIOCGETIF = 0x4020426b - BIOCGHDRCMPLT = 0x40044274 - BIOCGRSIG = 0x40044272 - BIOCGRTIMEOUT = 0x4010426e - BIOCGSEESENT = 0x40044276 - BIOCGSTATS = 0x4008426f - BIOCIMMEDIATE = 0x80044270 - BIOCPROMISC = 0x20004269 - BIOCSBLEN = 0xc0044266 - BIOCSDLT = 0x80044278 - BIOCSETF = 0x80104267 - BIOCSETFNR = 0x8010427e - BIOCSETIF = 0x8020426c - BIOCSHDRCMPLT = 0x80044275 - BIOCSRSIG = 0x80044273 - BIOCSRTIMEOUT = 0x8010426d - BIOCSSEESENT = 0x80044277 - BIOCVERSION = 0x40044271 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ALIGNMENT = 0x4 - BPF_ALU = 0x4 - BPF_AND = 0x50 - BPF_B = 0x10 - BPF_DIV = 0x30 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JMP = 0x5 - BPF_JSET = 0x40 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXBUFSIZE = 0x80000 - BPF_MAXINSNS = 0x200 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINBUFSIZE = 0x20 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_OR = 0x40 - BPF_RELEASE = 0x30bb6 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAX = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 - CFLUSH = 0xf - CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_MONOTONIC_RAW_APPROX = 0x5 - CLOCK_PROCESS_CPUTIME_ID = 0xc - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x10 - CLOCK_UPTIME_RAW = 0x8 - CLOCK_UPTIME_RAW_APPROX = 0x9 - CLONE_NOFOLLOW = 0x1 - CLONE_NOOWNERCOPY = 0x2 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRDLY = 0x3000 - CREAD = 0x800 - CRTSCTS = 0x30000 - CS5 = 0x0 - CS6 = 0x100 - CS7 = 0x200 - CS8 = 0x300 - CSIZE = 0x300 - CSTART = 0x11 - CSTATUS = 0x14 - CSTOP = 0x13 - CSTOPB = 0x400 - CSUSP = 0x1a - CTLIOCGINFO = 0xc0644e03 - CTL_HW = 0x6 - CTL_KERN = 0x1 - CTL_MAXNAME = 0xc - CTL_NET = 0x4 - DLT_A429 = 0xb8 - DLT_A653_ICM = 0xb9 - DLT_AIRONET_HEADER = 0x78 - DLT_AOS = 0xde - DLT_APPLE_IP_OVER_IEEE1394 = 0x8a - DLT_ARCNET = 0x7 - DLT_ARCNET_LINUX = 0x81 - DLT_ATM_CLIP = 0x13 - DLT_ATM_RFC1483 = 0xb - DLT_AURORA = 0x7e - DLT_AX25 = 0x3 - DLT_AX25_KISS = 0xca - DLT_BACNET_MS_TP = 0xa5 - DLT_BLUETOOTH_HCI_H4 = 0xbb - DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 - DLT_CAN20B = 0xbe - DLT_CAN_SOCKETCAN = 0xe3 - DLT_CHAOS = 0x5 - DLT_CHDLC = 0x68 - DLT_CISCO_IOS = 0x76 - DLT_C_HDLC = 0x68 - DLT_C_HDLC_WITH_DIR = 0xcd - DLT_DBUS = 0xe7 - DLT_DECT = 0xdd - DLT_DOCSIS = 0x8f - DLT_DVB_CI = 0xeb - DLT_ECONET = 0x73 - DLT_EN10MB = 0x1 - DLT_EN3MB = 0x2 - DLT_ENC = 0x6d - DLT_ERF = 0xc5 - DLT_ERF_ETH = 0xaf - DLT_ERF_POS = 0xb0 - DLT_FC_2 = 0xe0 - DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 - DLT_FDDI = 0xa - DLT_FLEXRAY = 0xd2 - DLT_FRELAY = 0x6b - DLT_FRELAY_WITH_DIR = 0xce - DLT_GCOM_SERIAL = 0xad - DLT_GCOM_T1E1 = 0xac - DLT_GPF_F = 0xab - DLT_GPF_T = 0xaa - DLT_GPRS_LLC = 0xa9 - DLT_GSMTAP_ABIS = 0xda - DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 - DLT_IBM_SN = 0x92 - DLT_IBM_SP = 0x91 - DLT_IEEE802 = 0x6 - DLT_IEEE802_11 = 0x69 - DLT_IEEE802_11_RADIO = 0x7f - DLT_IEEE802_11_RADIO_AVS = 0xa3 - DLT_IEEE802_15_4 = 0xc3 - DLT_IEEE802_15_4_LINUX = 0xbf - DLT_IEEE802_15_4_NOFCS = 0xe6 - DLT_IEEE802_15_4_NONASK_PHY = 0xd7 - DLT_IEEE802_16_MAC_CPS = 0xbc - DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 - DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 - DLT_IPMB_LINUX = 0xd1 - DLT_IPNET = 0xe2 - DLT_IPOIB = 0xf2 - DLT_IPV4 = 0xe4 - DLT_IPV6 = 0xe5 - DLT_IP_OVER_FC = 0x7a - DLT_JUNIPER_ATM1 = 0x89 - DLT_JUNIPER_ATM2 = 0x87 - DLT_JUNIPER_ATM_CEMIC = 0xee - DLT_JUNIPER_CHDLC = 0xb5 - DLT_JUNIPER_ES = 0x84 - DLT_JUNIPER_ETHER = 0xb2 - DLT_JUNIPER_FIBRECHANNEL = 0xea - DLT_JUNIPER_FRELAY = 0xb4 - DLT_JUNIPER_GGSN = 0x85 - DLT_JUNIPER_ISM = 0xc2 - DLT_JUNIPER_MFR = 0x86 - DLT_JUNIPER_MLFR = 0x83 - DLT_JUNIPER_MLPPP = 0x82 - DLT_JUNIPER_MONITOR = 0xa4 - DLT_JUNIPER_PIC_PEER = 0xae - DLT_JUNIPER_PPP = 0xb3 - DLT_JUNIPER_PPPOE = 0xa7 - DLT_JUNIPER_PPPOE_ATM = 0xa8 - DLT_JUNIPER_SERVICES = 0x88 - DLT_JUNIPER_SRX_E2E = 0xe9 - DLT_JUNIPER_ST = 0xc8 - DLT_JUNIPER_VP = 0xb7 - DLT_JUNIPER_VS = 0xe8 - DLT_LAPB_WITH_DIR = 0xcf - DLT_LAPD = 0xcb - DLT_LIN = 0xd4 - DLT_LINUX_EVDEV = 0xd8 - DLT_LINUX_IRDA = 0x90 - DLT_LINUX_LAPD = 0xb1 - DLT_LINUX_PPP_WITHDIRECTION = 0xa6 - DLT_LINUX_SLL = 0x71 - DLT_LOOP = 0x6c - DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x10a - DLT_MATCHING_MIN = 0x68 - DLT_MFR = 0xb6 - DLT_MOST = 0xd3 - DLT_MPEG_2_TS = 0xf3 - DLT_MPLS = 0xdb - DLT_MTP2 = 0x8c - DLT_MTP2_WITH_PHDR = 0x8b - DLT_MTP3 = 0x8d - DLT_MUX27010 = 0xec - DLT_NETANALYZER = 0xf0 - DLT_NETANALYZER_TRANSPARENT = 0xf1 - DLT_NFC_LLCP = 0xf5 - DLT_NFLOG = 0xef - DLT_NG40 = 0xf4 - DLT_NULL = 0x0 - DLT_PCI_EXP = 0x7d - DLT_PFLOG = 0x75 - DLT_PFSYNC = 0x12 - DLT_PPI = 0xc0 - DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 - DLT_PPP_ETHER = 0x33 - DLT_PPP_PPPD = 0xa6 - DLT_PPP_SERIAL = 0x32 - DLT_PPP_WITH_DIR = 0xcc - DLT_PPP_WITH_DIRECTION = 0xa6 - DLT_PRISM_HEADER = 0x77 - DLT_PRONET = 0x4 - DLT_RAIF1 = 0xc6 - DLT_RAW = 0xc - DLT_RIO = 0x7c - DLT_SCCP = 0x8e - DLT_SITA = 0xc4 - DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf - DLT_STANAG_5066_D_PDU = 0xed - DLT_SUNATM = 0x7b - DLT_SYMANTEC_FIREWALL = 0x63 - DLT_TZSP = 0x80 - DLT_USB = 0xba - DLT_USB_DARWIN = 0x10a - DLT_USB_LINUX = 0xbd - DLT_USB_LINUX_MMAPPED = 0xdc - DLT_USER0 = 0x93 - DLT_USER1 = 0x94 - DLT_USER10 = 0x9d - DLT_USER11 = 0x9e - DLT_USER12 = 0x9f - DLT_USER13 = 0xa0 - DLT_USER14 = 0xa1 - DLT_USER15 = 0xa2 - DLT_USER2 = 0x95 - DLT_USER3 = 0x96 - DLT_USER4 = 0x97 - DLT_USER5 = 0x98 - DLT_USER6 = 0x99 - DLT_USER7 = 0x9a - DLT_USER8 = 0x9b - DLT_USER9 = 0x9c - DLT_WIHART = 0xdf - DLT_X2E_SERIAL = 0xd5 - DLT_X2E_XORAYA = 0xd6 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x40 - ECHOE = 0x2 - ECHOK = 0x4 - ECHOKE = 0x1 - ECHONL = 0x10 - ECHOPRT = 0x20 - EVFILT_AIO = -0x3 - EVFILT_EXCEPT = -0xf - EVFILT_FS = -0x9 - EVFILT_MACHPORT = -0x8 - EVFILT_PROC = -0x5 - EVFILT_READ = -0x1 - EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x11 - EVFILT_THREADMARKER = 0x11 - EVFILT_TIMER = -0x7 - EVFILT_USER = -0xa - EVFILT_VM = -0xc - EVFILT_VNODE = -0x4 - EVFILT_WRITE = -0x2 - EV_ADD = 0x1 - EV_CLEAR = 0x20 - EV_DELETE = 0x2 - EV_DISABLE = 0x8 - EV_DISPATCH = 0x80 - EV_DISPATCH2 = 0x180 - EV_ENABLE = 0x4 - EV_EOF = 0x8000 - EV_ERROR = 0x4000 - EV_FLAG0 = 0x1000 - EV_FLAG1 = 0x2000 - EV_ONESHOT = 0x10 - EV_OOBAND = 0x2000 - EV_POLL = 0x1000 - EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 - EV_UDATA_SPECIFIC = 0x100 - EV_VANISHED = 0x200 - EXTA = 0x4b00 - EXTB = 0x9600 - EXTPROC = 0x800 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 - FLUSHO = 0x800000 - FSOPT_ATTR_CMN_EXTENDED = 0x20 - FSOPT_NOFOLLOW = 0x1 - FSOPT_NOINMEMUPDATE = 0x2 - FSOPT_PACK_INVAL_ATTRS = 0x8 - FSOPT_REPORT_FULLSIZE = 0x4 - FSOPT_RETURN_REALDEV = 0x200 - F_ADDFILESIGS = 0x3d - F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 - F_ADDFILESIGS_INFO = 0x67 - F_ADDFILESIGS_RETURN = 0x61 - F_ADDFILESUPPL = 0x68 - F_ADDSIGS = 0x3b - F_ALLOCATEALL = 0x4 - F_ALLOCATECONTIG = 0x2 - F_BARRIERFSYNC = 0x55 - F_CHECK_LV = 0x62 - F_CHKCLEAN = 0x29 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x43 - F_FINDSIGS = 0x4e - F_FLUSH_DATA = 0x28 - F_FREEZE_FS = 0x35 - F_FULLFSYNC = 0x33 - F_GETCODEDIR = 0x48 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLK = 0x7 - F_GETLKPID = 0x42 - F_GETNOSIGPIPE = 0x4a - F_GETOWN = 0x5 - F_GETPATH = 0x32 - F_GETPATH_MTMINFO = 0x47 - F_GETPATH_NOFIRMLINK = 0x66 - F_GETPROTECTIONCLASS = 0x3f - F_GETPROTECTIONLEVEL = 0x4d - F_GETSIGSINFO = 0x69 - F_GLOBAL_NOCACHE = 0x37 - F_LOG2PHYS = 0x31 - F_LOG2PHYS_EXT = 0x41 - F_NOCACHE = 0x30 - F_NODIRECT = 0x3e - F_OK = 0x0 - F_PATHPKG_CHECK = 0x34 - F_PEOFPOSMODE = 0x3 - F_PREALLOCATE = 0x2a - F_PUNCHHOLE = 0x63 - F_RDADVISE = 0x2c - F_RDAHEAD = 0x2d - F_RDLCK = 0x1 - F_SETBACKINGSTORE = 0x46 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLK = 0x8 - F_SETLKW = 0x9 - F_SETLKWTIMEOUT = 0xa - F_SETNOSIGPIPE = 0x49 - F_SETOWN = 0x6 - F_SETPROTECTIONCLASS = 0x40 - F_SETSIZE = 0x2b - F_SINGLE_WRITER = 0x4c - F_SPECULATIVE_READ = 0x65 - F_THAW_FS = 0x36 - F_TRANSCODEKEY = 0x4b - F_TRIM_ACTIVE_FILE = 0x64 - F_UNLCK = 0x2 - F_VOLPOSMODE = 0x4 - F_WRLCK = 0x3 - HUPCL = 0x4000 - HW_MACHINE = 0x1 - ICANON = 0x100 - ICMP6_FILTER = 0x12 - ICRNL = 0x100 - IEXTEN = 0x400 - IFF_ALLMULTI = 0x200 - IFF_ALTPHYS = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_LINK0 = 0x1000 - IFF_LINK1 = 0x2000 - IFF_LINK2 = 0x4000 - IFF_LOOPBACK = 0x8 - IFF_MULTICAST = 0x8000 - IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 - IFF_OACTIVE = 0x400 - IFF_POINTOPOINT = 0x10 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SIMPLEX = 0x800 - IFF_UP = 0x1 - IFNAMSIZ = 0x10 - IFT_1822 = 0x2 - IFT_6LOWPAN = 0x40 - IFT_AAL5 = 0x31 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ATM = 0x25 - IFT_BRIDGE = 0xd1 - IFT_CARP = 0xf8 - IFT_CELLULAR = 0xff - IFT_CEPT = 0x13 - IFT_DS3 = 0x1e - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_ETHER = 0x6 - IFT_FAITH = 0x38 - IFT_FDDI = 0xf - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_GIF = 0x37 - IFT_HDH1822 = 0x3 - IFT_HIPPI = 0x2f - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IEEE1394 = 0x90 - IFT_IEEE8023ADLAG = 0x88 - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88026 = 0xa - IFT_L2VLAN = 0x87 - IFT_LAPB = 0x10 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_NSIP = 0x1b - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PDP = 0xff - IFT_PFLOG = 0xf5 - IFT_PFSYNC = 0xf6 - IFT_PKTAP = 0xfe - IFT_PPP = 0x17 - IFT_PROPMUX = 0x36 - IFT_PROPVIRTUAL = 0x35 - IFT_PTPSERIAL = 0x16 - IFT_RS232 = 0x21 - IFT_SDLC = 0x11 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_STARLAN = 0xb - IFT_STF = 0x39 - IFT_T1 = 0x12 - IFT_ULTRA = 0x1d - IFT_V35 = 0x2d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLASSD_HOST = 0xfffffff - IN_CLASSD_NET = 0xf0000000 - IN_CLASSD_NSHIFT = 0x1c - IN_LINKLOCALNETNUM = 0xa9fe0000 - IN_LOOPBACKNET = 0x7f - IPPROTO_3PC = 0x22 - IPPROTO_ADFS = 0x44 - IPPROTO_AH = 0x33 - IPPROTO_AHIP = 0x3d - IPPROTO_APES = 0x63 - IPPROTO_ARGUS = 0xd - IPPROTO_AX25 = 0x5d - IPPROTO_BHA = 0x31 - IPPROTO_BLT = 0x1e - IPPROTO_BRSATMON = 0x4c - IPPROTO_CFTP = 0x3e - IPPROTO_CHAOS = 0x10 - IPPROTO_CMTP = 0x26 - IPPROTO_CPHB = 0x49 - IPPROTO_CPNX = 0x48 - IPPROTO_DDP = 0x25 - IPPROTO_DGP = 0x56 - IPPROTO_DIVERT = 0xfe - IPPROTO_DONE = 0x101 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_EMCON = 0xe - IPPROTO_ENCAP = 0x62 - IPPROTO_EON = 0x50 - IPPROTO_ESP = 0x32 - IPPROTO_ETHERIP = 0x61 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GGP = 0x3 - IPPROTO_GMTP = 0x64 - IPPROTO_GRE = 0x2f - IPPROTO_HELLO = 0x3f - IPPROTO_HMP = 0x14 - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IDPR = 0x23 - IPPROTO_IDRP = 0x2d - IPPROTO_IGMP = 0x2 - IPPROTO_IGP = 0x55 - IPPROTO_IGRP = 0x58 - IPPROTO_IL = 0x28 - IPPROTO_INLSP = 0x34 - IPPROTO_INP = 0x20 - IPPROTO_IP = 0x0 - IPPROTO_IPCOMP = 0x6c - IPPROTO_IPCV = 0x47 - IPPROTO_IPEIP = 0x5e - IPPROTO_IPIP = 0x4 - IPPROTO_IPPC = 0x43 - IPPROTO_IPV4 = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_IRTP = 0x1c - IPPROTO_KRYPTOLAN = 0x41 - IPPROTO_LARP = 0x5b - IPPROTO_LEAF1 = 0x19 - IPPROTO_LEAF2 = 0x1a - IPPROTO_MAX = 0x100 - IPPROTO_MAXID = 0x34 - IPPROTO_MEAS = 0x13 - IPPROTO_MHRP = 0x30 - IPPROTO_MICP = 0x5f - IPPROTO_MTP = 0x5c - IPPROTO_MUX = 0x12 - IPPROTO_ND = 0x4d - IPPROTO_NHRP = 0x36 - IPPROTO_NONE = 0x3b - IPPROTO_NSP = 0x1f - IPPROTO_NVPII = 0xb - IPPROTO_OSPFIGP = 0x59 - IPPROTO_PGM = 0x71 - IPPROTO_PIGP = 0x9 - IPPROTO_PIM = 0x67 - IPPROTO_PRM = 0x15 - IPPROTO_PUP = 0xc - IPPROTO_PVP = 0x4b - IPPROTO_RAW = 0xff - IPPROTO_RCCMON = 0xa - IPPROTO_RDP = 0x1b - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_RVD = 0x42 - IPPROTO_SATEXPAK = 0x40 - IPPROTO_SATMON = 0x45 - IPPROTO_SCCSP = 0x60 - IPPROTO_SCTP = 0x84 - IPPROTO_SDRP = 0x2a - IPPROTO_SEP = 0x21 - IPPROTO_SRPC = 0x5a - IPPROTO_ST = 0x7 - IPPROTO_SVMTP = 0x52 - IPPROTO_SWIPE = 0x35 - IPPROTO_TCF = 0x57 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_TPXX = 0x27 - IPPROTO_TRUNK1 = 0x17 - IPPROTO_TRUNK2 = 0x18 - IPPROTO_TTP = 0x54 - IPPROTO_UDP = 0x11 - IPPROTO_VINES = 0x53 - IPPROTO_VISA = 0x46 - IPPROTO_VMTP = 0x51 - IPPROTO_WBEXPAK = 0x4f - IPPROTO_WBMON = 0x4e - IPPROTO_WSN = 0x4a - IPPROTO_XNET = 0xf - IPPROTO_XTP = 0x24 - IPV6_2292DSTOPTS = 0x17 - IPV6_2292HOPLIMIT = 0x14 - IPV6_2292HOPOPTS = 0x16 - IPV6_2292NEXTHOP = 0x15 - IPV6_2292PKTINFO = 0x13 - IPV6_2292PKTOPTIONS = 0x19 - IPV6_2292RTHDR = 0x18 - IPV6_3542DSTOPTS = 0x32 - IPV6_3542HOPLIMIT = 0x2f - IPV6_3542HOPOPTS = 0x31 - IPV6_3542NEXTHOP = 0x30 - IPV6_3542PKTINFO = 0x2e - IPV6_3542RTHDR = 0x33 - IPV6_ADDR_MC_FLAGS_PREFIX = 0x20 - IPV6_ADDR_MC_FLAGS_TRANSIENT = 0x10 - IPV6_ADDR_MC_FLAGS_UNICAST_BASED = 0x30 - IPV6_AUTOFLOWLABEL = 0x3b - IPV6_BINDV6ONLY = 0x1b - IPV6_BOUND_IF = 0x7d - IPV6_CHECKSUM = 0x1a - IPV6_DEFAULT_MULTICAST_HOPS = 0x1 - IPV6_DEFAULT_MULTICAST_LOOP = 0x1 - IPV6_DEFHLIM = 0x40 - IPV6_DONTFRAG = 0x3e - IPV6_DSTOPTS = 0x32 - IPV6_FAITH = 0x1d - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FLOW_ECN_MASK = 0x3000 - IPV6_FRAGTTL = 0x3c - IPV6_FW_ADD = 0x1e - IPV6_FW_DEL = 0x1f - IPV6_FW_FLUSH = 0x20 - IPV6_FW_GET = 0x22 - IPV6_FW_ZERO = 0x21 - IPV6_HLIMDEC = 0x1 - IPV6_HOPLIMIT = 0x2f - IPV6_HOPOPTS = 0x31 - IPV6_IPSEC_POLICY = 0x1c - IPV6_JOIN_GROUP = 0xc - IPV6_LEAVE_GROUP = 0xd - IPV6_MAXHLIM = 0xff - IPV6_MAXOPTHDR = 0x800 - IPV6_MAXPACKET = 0xffff - IPV6_MAX_GROUP_SRC_FILTER = 0x200 - IPV6_MAX_MEMBERSHIPS = 0xfff - IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f - IPV6_MMTU = 0x500 - IPV6_MSFILTER = 0x4a - IPV6_MULTICAST_HOPS = 0xa - IPV6_MULTICAST_IF = 0x9 - IPV6_MULTICAST_LOOP = 0xb - IPV6_NEXTHOP = 0x30 - IPV6_PATHMTU = 0x2c - IPV6_PKTINFO = 0x2e - IPV6_PORTRANGE = 0xe - IPV6_PORTRANGE_DEFAULT = 0x0 - IPV6_PORTRANGE_HIGH = 0x1 - IPV6_PORTRANGE_LOW = 0x2 - IPV6_PREFER_TEMPADDR = 0x3f - IPV6_RECVDSTOPTS = 0x28 - IPV6_RECVHOPLIMIT = 0x25 - IPV6_RECVHOPOPTS = 0x27 - IPV6_RECVPATHMTU = 0x2b - IPV6_RECVPKTINFO = 0x3d - IPV6_RECVRTHDR = 0x26 - IPV6_RECVTCLASS = 0x23 - IPV6_RTHDR = 0x33 - IPV6_RTHDRDSTOPTS = 0x39 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_SOCKOPT_RESERVED1 = 0x3 - IPV6_TCLASS = 0x24 - IPV6_UNICAST_HOPS = 0x4 - IPV6_USE_MIN_MTU = 0x2a - IPV6_V6ONLY = 0x1b - IPV6_VERSION = 0x60 - IPV6_VERSION_MASK = 0xf0 - IP_ADD_MEMBERSHIP = 0xc - IP_ADD_SOURCE_MEMBERSHIP = 0x46 - IP_BLOCK_SOURCE = 0x48 - IP_BOUND_IF = 0x19 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DONTFRAG = 0x1c - IP_DROP_MEMBERSHIP = 0xd - IP_DROP_SOURCE_MEMBERSHIP = 0x47 - IP_DUMMYNET_CONFIGURE = 0x3c - IP_DUMMYNET_DEL = 0x3d - IP_DUMMYNET_FLUSH = 0x3e - IP_DUMMYNET_GET = 0x40 - IP_FAITH = 0x16 - IP_FW_ADD = 0x28 - IP_FW_DEL = 0x29 - IP_FW_FLUSH = 0x2a - IP_FW_GET = 0x2c - IP_FW_RESETLOG = 0x2d - IP_FW_ZERO = 0x2b - IP_HDRINCL = 0x2 - IP_IPSEC_POLICY = 0x15 - IP_MAXPACKET = 0xffff - IP_MAX_GROUP_SRC_FILTER = 0x200 - IP_MAX_MEMBERSHIPS = 0xfff - IP_MAX_SOCK_MUTE_FILTER = 0x80 - IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MF = 0x2000 - IP_MIN_MEMBERSHIPS = 0x1f - IP_MSFILTER = 0x4a - IP_MSS = 0x240 - IP_MULTICAST_IF = 0x9 - IP_MULTICAST_IFINDEX = 0x42 - IP_MULTICAST_LOOP = 0xb - IP_MULTICAST_TTL = 0xa - IP_MULTICAST_VIF = 0xe - IP_NAT__XXX = 0x37 - IP_OFFMASK = 0x1fff - IP_OLD_FW_ADD = 0x32 - IP_OLD_FW_DEL = 0x33 - IP_OLD_FW_FLUSH = 0x34 - IP_OLD_FW_GET = 0x36 - IP_OLD_FW_RESETLOG = 0x38 - IP_OLD_FW_ZERO = 0x35 - IP_OPTIONS = 0x1 - IP_PKTINFO = 0x1a - IP_PORTRANGE = 0x13 - IP_PORTRANGE_DEFAULT = 0x0 - IP_PORTRANGE_HIGH = 0x1 - IP_PORTRANGE_LOW = 0x2 - IP_RECVDSTADDR = 0x7 - IP_RECVIF = 0x14 - IP_RECVOPTS = 0x5 - IP_RECVPKTINFO = 0x1a - IP_RECVRETOPTS = 0x6 - IP_RECVTOS = 0x1b - IP_RECVTTL = 0x18 - IP_RETOPTS = 0x8 - IP_RF = 0x8000 - IP_RSVP_OFF = 0x10 - IP_RSVP_ON = 0xf - IP_RSVP_VIF_OFF = 0x12 - IP_RSVP_VIF_ON = 0x11 - IP_STRIPHDR = 0x17 - IP_TOS = 0x3 - IP_TRAFFIC_MGT_BACKGROUND = 0x41 - IP_TTL = 0x4 - IP_UNBLOCK_SOURCE = 0x49 - ISIG = 0x80 - ISTRIP = 0x20 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x400 - IXON = 0x200 - KERN_HOSTNAME = 0xa - KERN_OSRELEASE = 0x2 - KERN_OSTYPE = 0x1 - KERN_VERSION = 0x4 - LOCAL_PEERCRED = 0x1 - LOCAL_PEEREPID = 0x3 - LOCAL_PEEREUUID = 0x5 - LOCAL_PEERPID = 0x2 - LOCAL_PEERTOKEN = 0x6 - LOCAL_PEERUUID = 0x4 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - MADV_CAN_REUSE = 0x9 - MADV_DONTNEED = 0x4 - MADV_FREE = 0x5 - MADV_FREE_REUSABLE = 0x7 - MADV_FREE_REUSE = 0x8 - MADV_NORMAL = 0x0 - MADV_PAGEOUT = 0xa - MADV_RANDOM = 0x1 - MADV_SEQUENTIAL = 0x2 - MADV_WILLNEED = 0x3 - MADV_ZERO_WIRED_PAGES = 0x6 - MAP_32BIT = 0x8000 - MAP_ANON = 0x1000 - MAP_ANONYMOUS = 0x1000 - MAP_COPY = 0x2 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_HASSEMAPHORE = 0x200 - MAP_JIT = 0x800 - MAP_NOCACHE = 0x400 - MAP_NOEXTEND = 0x100 - MAP_NORESERVE = 0x40 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x20 - MAP_RESERVED0080 = 0x80 - MAP_RESILIENT_CODESIGN = 0x2000 - MAP_RESILIENT_MEDIA = 0x4000 - MAP_SHARED = 0x1 - MAP_TRANSLATED_ALLOW_EXECUTE = 0x20000 - MAP_UNIX03 = 0x40000 - MCAST_BLOCK_SOURCE = 0x54 - MCAST_EXCLUDE = 0x2 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x50 - MCAST_JOIN_SOURCE_GROUP = 0x52 - MCAST_LEAVE_GROUP = 0x51 - MCAST_LEAVE_SOURCE_GROUP = 0x53 - MCAST_UNBLOCK_SOURCE = 0x55 - MCAST_UNDEFINED = 0x0 - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MNT_ASYNC = 0x40 - MNT_AUTOMOUNTED = 0x400000 - MNT_CMDFLAGS = 0xf0000 - MNT_CPROTECT = 0x80 - MNT_DEFWRITE = 0x2000000 - MNT_DONTBROWSE = 0x100000 - MNT_DOVOLFS = 0x8000 - MNT_DWAIT = 0x4 - MNT_EXPORTED = 0x100 - MNT_EXT_ROOT_DATA_VOL = 0x1 - MNT_FORCE = 0x80000 - MNT_IGNORE_OWNERSHIP = 0x200000 - MNT_JOURNALED = 0x800000 - MNT_LOCAL = 0x1000 - MNT_MULTILABEL = 0x4000000 - MNT_NOATIME = 0x10000000 - MNT_NOBLOCK = 0x20000 - MNT_NODEV = 0x10 - MNT_NOEXEC = 0x4 - MNT_NOSUID = 0x8 - MNT_NOUSERXATTR = 0x1000000 - MNT_NOWAIT = 0x2 - MNT_QUARANTINE = 0x400 - MNT_QUOTA = 0x2000 - MNT_RDONLY = 0x1 - MNT_RELOAD = 0x40000 - MNT_REMOVABLE = 0x200 - MNT_ROOTFS = 0x4000 - MNT_SNAPSHOT = 0x40000000 - MNT_STRICTATIME = 0x80000000 - MNT_SYNCHRONOUS = 0x2 - MNT_UNION = 0x20 - MNT_UNKNOWNPERMISSIONS = 0x200000 - MNT_UPDATE = 0x10000 - MNT_VISFLAGMASK = 0xd7f0f7ff - MNT_WAIT = 0x1 - MSG_CTRUNC = 0x20 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x80 - MSG_EOF = 0x100 - MSG_EOR = 0x8 - MSG_FLUSH = 0x400 - MSG_HAVEMORE = 0x2000 - MSG_HOLD = 0x800 - MSG_NEEDSA = 0x10000 - MSG_NOSIGNAL = 0x80000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_RCVMORE = 0x4000 - MSG_SEND = 0x1000 - MSG_TRUNC = 0x10 - MSG_WAITALL = 0x40 - MSG_WAITSTREAM = 0x200 - MS_ASYNC = 0x1 - MS_DEACTIVATE = 0x8 - MS_INVALIDATE = 0x2 - MS_KILLPAGES = 0x4 - MS_SYNC = 0x10 - NAME_MAX = 0xff - NET_RT_DUMP = 0x1 - NET_RT_DUMP2 = 0x7 - NET_RT_FLAGS = 0x2 - NET_RT_FLAGS_PRIV = 0xa - NET_RT_IFLIST = 0x3 - NET_RT_IFLIST2 = 0x6 - NET_RT_MAXID = 0xb - NET_RT_STAT = 0x4 - NET_RT_TRASH = 0x5 - NFDBITS = 0x20 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 - NLDLY = 0x300 - NOFLSH = 0x80000000 - NOKERNINFO = 0x2000000 - NOTE_ABSOLUTE = 0x8 - NOTE_ATTRIB = 0x8 - NOTE_BACKGROUND = 0x40 - NOTE_CHILD = 0x4 - NOTE_CRITICAL = 0x20 - NOTE_DELETE = 0x1 - NOTE_EXEC = 0x20000000 - NOTE_EXIT = 0x80000000 - NOTE_EXITSTATUS = 0x4000000 - NOTE_EXIT_CSERROR = 0x40000 - NOTE_EXIT_DECRYPTFAIL = 0x10000 - NOTE_EXIT_DETAIL = 0x2000000 - NOTE_EXIT_DETAIL_MASK = 0x70000 - NOTE_EXIT_MEMORY = 0x20000 - NOTE_EXIT_REPARENTED = 0x80000 - NOTE_EXTEND = 0x4 - NOTE_FFAND = 0x40000000 - NOTE_FFCOPY = 0xc0000000 - NOTE_FFCTRLMASK = 0xc0000000 - NOTE_FFLAGSMASK = 0xffffff - NOTE_FFNOP = 0x0 - NOTE_FFOR = 0x80000000 - NOTE_FORK = 0x40000000 - NOTE_FUNLOCK = 0x100 - NOTE_LEEWAY = 0x10 - NOTE_LINK = 0x10 - NOTE_LOWAT = 0x1 - NOTE_MACHTIME = 0x100 - NOTE_MACH_CONTINUOUS_TIME = 0x80 - NOTE_NONE = 0x80 - NOTE_NSECONDS = 0x4 - NOTE_OOB = 0x2 - NOTE_PCTRLMASK = -0x100000 - NOTE_PDATAMASK = 0xfffff - NOTE_REAP = 0x10000000 - NOTE_RENAME = 0x20 - NOTE_REVOKE = 0x40 - NOTE_SECONDS = 0x1 - NOTE_SIGNAL = 0x8000000 - NOTE_TRACK = 0x1 - NOTE_TRACKERR = 0x2 - NOTE_TRIGGER = 0x1000000 - NOTE_USECONDS = 0x2 - NOTE_VM_ERROR = 0x10000000 - NOTE_VM_PRESSURE = 0x80000000 - NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 - NOTE_VM_PRESSURE_TERMINATE = 0x40000000 - NOTE_WRITE = 0x2 - OCRNL = 0x10 - OFDEL = 0x20000 - OFILL = 0x80 - ONLCR = 0x2 - ONLRET = 0x40 - ONOCR = 0x20 - ONOEOT = 0x8 - OPOST = 0x1 - OXTABS = 0x4 - O_ACCMODE = 0x3 - O_ALERT = 0x20000000 - O_APPEND = 0x8 - O_ASYNC = 0x40 - O_CLOEXEC = 0x1000000 - O_CREAT = 0x200 - O_DIRECTORY = 0x100000 - O_DP_GETRAWENCRYPTED = 0x1 - O_DP_GETRAWUNENCRYPTED = 0x2 - O_DSYNC = 0x400000 - O_EVTONLY = 0x8000 - O_EXCL = 0x800 - O_EXLOCK = 0x20 - O_FSYNC = 0x80 - O_NDELAY = 0x4 - O_NOCTTY = 0x20000 - O_NOFOLLOW = 0x100 - O_NOFOLLOW_ANY = 0x20000000 - O_NONBLOCK = 0x4 - O_POPUP = 0x80000000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_SHLOCK = 0x10 - O_SYMLINK = 0x200000 - O_SYNC = 0x80 - O_TRUNC = 0x400 - O_WRONLY = 0x1 - PARENB = 0x1000 - PARMRK = 0x8 - PARODD = 0x2000 - PENDIN = 0x20000000 - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROT_EXEC = 0x4 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PT_ATTACH = 0xa - PT_ATTACHEXC = 0xe - PT_CONTINUE = 0x7 - PT_DENY_ATTACH = 0x1f - PT_DETACH = 0xb - PT_FIRSTMACH = 0x20 - PT_FORCEQUOTA = 0x1e - PT_KILL = 0x8 - PT_READ_D = 0x2 - PT_READ_I = 0x1 - PT_READ_U = 0x3 - PT_SIGEXC = 0xc - PT_STEP = 0x9 - PT_THUPDATE = 0xd - PT_TRACE_ME = 0x0 - PT_WRITE_D = 0x5 - PT_WRITE_I = 0x4 - PT_WRITE_U = 0x6 - RLIMIT_AS = 0x5 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_CPU_USAGE_MONITOR = 0x2 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_MEMLOCK = 0x6 - RLIMIT_NOFILE = 0x8 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0x7fffffffffffffff - RTAX_AUTHOR = 0x6 - RTAX_BRD = 0x7 - RTAX_DST = 0x0 - RTAX_GATEWAY = 0x1 - RTAX_GENMASK = 0x3 - RTAX_IFA = 0x5 - RTAX_IFP = 0x4 - RTAX_MAX = 0x8 - RTAX_NETMASK = 0x2 - RTA_AUTHOR = 0x40 - RTA_BRD = 0x80 - RTA_DST = 0x1 - RTA_GATEWAY = 0x2 - RTA_GENMASK = 0x8 - RTA_IFA = 0x20 - RTA_IFP = 0x10 - RTA_NETMASK = 0x4 - RTF_BLACKHOLE = 0x1000 - RTF_BROADCAST = 0x400000 - RTF_CLONING = 0x100 - RTF_CONDEMNED = 0x2000000 - RTF_DEAD = 0x20000000 - RTF_DELCLONE = 0x80 - RTF_DONE = 0x40 - RTF_DYNAMIC = 0x10 - RTF_GATEWAY = 0x2 - RTF_GLOBAL = 0x40000000 - RTF_HOST = 0x4 - RTF_IFREF = 0x4000000 - RTF_IFSCOPE = 0x1000000 - RTF_LLDATA = 0x400 - RTF_LLINFO = 0x400 - RTF_LOCAL = 0x200000 - RTF_MODIFIED = 0x20 - RTF_MULTICAST = 0x800000 - RTF_NOIFREF = 0x2000 - RTF_PINNED = 0x100000 - RTF_PRCLONING = 0x10000 - RTF_PROTO1 = 0x8000 - RTF_PROTO2 = 0x4000 - RTF_PROTO3 = 0x40000 - RTF_PROXY = 0x8000000 - RTF_REJECT = 0x8 - RTF_ROUTER = 0x10000000 - RTF_STATIC = 0x800 - RTF_UP = 0x1 - RTF_WASCLONED = 0x20000 - RTF_XRESOLVE = 0x200 - RTM_ADD = 0x1 - RTM_CHANGE = 0x3 - RTM_DELADDR = 0xd - RTM_DELETE = 0x2 - RTM_DELMADDR = 0x10 - RTM_GET = 0x4 - RTM_GET2 = 0x14 - RTM_IFINFO = 0xe - RTM_IFINFO2 = 0x12 - RTM_LOCK = 0x8 - RTM_LOSING = 0x5 - RTM_MISS = 0x7 - RTM_NEWADDR = 0xc - RTM_NEWMADDR = 0xf - RTM_NEWMADDR2 = 0x13 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RTM_REDIRECT = 0x6 - RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 - RTM_VERSION = 0x5 - RTV_EXPIRE = 0x4 - RTV_HOPCOUNT = 0x2 - RTV_MTU = 0x1 - RTV_RPIPE = 0x8 - RTV_RTT = 0x40 - RTV_RTTVAR = 0x80 - RTV_SPIPE = 0x10 - RTV_SSTHRESH = 0x20 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - SCM_CREDS = 0x3 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x2 - SCM_TIMESTAMP_MONOTONIC = 0x4 - SEEK_CUR = 0x1 - SEEK_DATA = 0x4 - SEEK_END = 0x2 - SEEK_HOLE = 0x3 - SEEK_SET = 0x0 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDMULTI = 0x80206931 - SIOCAIFADDR = 0x8040691a - SIOCARPIPLL = 0xc0206928 - SIOCATMARK = 0x40047307 - SIOCAUTOADDR = 0xc0206926 - SIOCAUTONETMASK = 0x80206927 - SIOCDELMULTI = 0x80206932 - SIOCDIFADDR = 0x80206919 - SIOCDIFPHYADDR = 0x80206941 - SIOCGDRVSPEC = 0xc028697b - SIOCGETVLAN = 0xc020697f - SIOCGHIWAT = 0x40047301 - SIOCGIF6LOWPAN = 0xc02069c5 - SIOCGIFADDR = 0xc0206921 - SIOCGIFALTMTU = 0xc0206948 - SIOCGIFASYNCMAP = 0xc020697c - SIOCGIFBOND = 0xc0206947 - SIOCGIFBRDADDR = 0xc0206923 - SIOCGIFCAP = 0xc020695b - SIOCGIFCONF = 0xc00c6924 - SIOCGIFDEVMTU = 0xc0206944 - SIOCGIFDSTADDR = 0xc0206922 - SIOCGIFFLAGS = 0xc0206911 - SIOCGIFFUNCTIONALTYPE = 0xc02069ad - SIOCGIFGENERIC = 0xc020693a - SIOCGIFKPI = 0xc0206987 - SIOCGIFMAC = 0xc0206982 - SIOCGIFMEDIA = 0xc02c6938 - SIOCGIFMETRIC = 0xc0206917 - SIOCGIFMTU = 0xc0206933 - SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206940 - SIOCGIFPHYS = 0xc0206935 - SIOCGIFPSRCADDR = 0xc020693f - SIOCGIFSTATUS = 0xc331693d - SIOCGIFVLAN = 0xc020697f - SIOCGIFWAKEFLAGS = 0xc0206988 - SIOCGIFXMEDIA = 0xc02c6948 - SIOCGLOWAT = 0x40047303 - SIOCGPGRP = 0x40047309 - SIOCIFCREATE = 0xc0206978 - SIOCIFCREATE2 = 0xc020697a - SIOCIFDESTROY = 0x80206979 - SIOCIFGCLONERS = 0xc0106981 - SIOCRSLVMULTI = 0xc010693b - SIOCSDRVSPEC = 0x8028697b - SIOCSETVLAN = 0x8020697e - SIOCSHIWAT = 0x80047300 - SIOCSIF6LOWPAN = 0x802069c4 - SIOCSIFADDR = 0x8020690c - SIOCSIFALTMTU = 0x80206945 - SIOCSIFASYNCMAP = 0x8020697d - SIOCSIFBOND = 0x80206946 - SIOCSIFBRDADDR = 0x80206913 - SIOCSIFCAP = 0x8020695a - SIOCSIFDSTADDR = 0x8020690e - SIOCSIFFLAGS = 0x80206910 - SIOCSIFGENERIC = 0x80206939 - SIOCSIFKPI = 0x80206986 - SIOCSIFLLADDR = 0x8020693c - SIOCSIFMAC = 0x80206983 - SIOCSIFMEDIA = 0xc0206937 - SIOCSIFMETRIC = 0x80206918 - SIOCSIFMTU = 0x80206934 - SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x8040693e - SIOCSIFPHYS = 0x80206936 - SIOCSIFVLAN = 0x8020697e - SIOCSLOWAT = 0x80047302 - SIOCSPGRP = 0x80047308 - SOCK_DGRAM = 0x2 - SOCK_MAXADDRLEN = 0xff - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_LOCAL = 0x0 - SOL_SOCKET = 0xffff - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x2 - SO_BROADCAST = 0x20 - SO_DEBUG = 0x1 - SO_DONTROUTE = 0x10 - SO_DONTTRUNC = 0x2000 - SO_ERROR = 0x1007 - SO_KEEPALIVE = 0x8 - SO_LABEL = 0x1010 - SO_LINGER = 0x80 - SO_LINGER_SEC = 0x1080 - SO_NETSVC_MARKING_LEVEL = 0x1119 - SO_NET_SERVICE_TYPE = 0x1116 - SO_NKE = 0x1021 - SO_NOADDRERR = 0x1023 - SO_NOSIGPIPE = 0x1022 - SO_NOTIFYCONFLICT = 0x1026 - SO_NP_EXTENSIONS = 0x1083 - SO_NREAD = 0x1020 - SO_NUMRCVPKT = 0x1112 - SO_NWRITE = 0x1024 - SO_OOBINLINE = 0x100 - SO_PEERLABEL = 0x1011 - SO_RANDOMPORT = 0x1082 - SO_RCVBUF = 0x1002 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_REUSESHAREUID = 0x1025 - SO_SNDBUF = 0x1001 - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_TIMESTAMP = 0x400 - SO_TIMESTAMP_MONOTONIC = 0x800 - SO_TYPE = 0x1008 - SO_UPCALLCLOSEWAIT = 0x1027 - SO_USELOOPBACK = 0x40 - SO_WANTMORE = 0x4000 - SO_WANTOOBFLAG = 0x8000 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IFWHT = 0xe000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISTXT = 0x200 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0x4 - TABDLY = 0xc04 - TCIFLUSH = 0x1 - TCIOFF = 0x3 - TCIOFLUSH = 0x3 - TCION = 0x4 - TCOFLUSH = 0x2 - TCOOFF = 0x1 - TCOON = 0x2 - TCP_CONNECTIONTIMEOUT = 0x20 - TCP_CONNECTION_INFO = 0x106 - TCP_ENABLE_ECN = 0x104 - TCP_FASTOPEN = 0x105 - TCP_KEEPALIVE = 0x10 - TCP_KEEPCNT = 0x102 - TCP_KEEPINTVL = 0x101 - TCP_MAXHLEN = 0x3c - TCP_MAXOLEN = 0x28 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_SACK = 0x4 - TCP_MAX_WINSHIFT = 0xe - TCP_MINMSS = 0xd8 - TCP_MSS = 0x200 - TCP_NODELAY = 0x1 - TCP_NOOPT = 0x8 - TCP_NOPUSH = 0x4 - TCP_NOTSENT_LOWAT = 0x201 - TCP_RXT_CONNDROPTIME = 0x80 - TCP_RXT_FINDROP = 0x100 - TCP_SENDMOREACKS = 0x103 - TCSAFLUSH = 0x2 - TIOCCBRK = 0x2000747a - TIOCCDTR = 0x20007478 - TIOCCONS = 0x80047462 - TIOCDCDTIMESTAMP = 0x40107458 - TIOCDRAIN = 0x2000745e - TIOCDSIMICROCODE = 0x20007455 - TIOCEXCL = 0x2000740d - TIOCEXT = 0x80047460 - TIOCFLUSH = 0x80047410 - TIOCGDRAINWAIT = 0x40047456 - TIOCGETA = 0x40487413 - TIOCGETD = 0x4004741a - TIOCGPGRP = 0x40047477 - TIOCGWINSZ = 0x40087468 - TIOCIXOFF = 0x20007480 - TIOCIXON = 0x20007481 - TIOCMBIC = 0x8004746b - TIOCMBIS = 0x8004746c - TIOCMGDTRWAIT = 0x4004745a - TIOCMGET = 0x4004746a - TIOCMODG = 0x40047403 - TIOCMODS = 0x80047404 - TIOCMSDTRWAIT = 0x8004745b - TIOCMSET = 0x8004746d - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x20007471 - TIOCNXCL = 0x2000740e - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x80047470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCPTYGNAME = 0x40807453 - TIOCPTYGRANT = 0x20007454 - TIOCPTYUNLK = 0x20007452 - TIOCREMOTE = 0x80047469 - TIOCSBRK = 0x2000747b - TIOCSCONS = 0x20007463 - TIOCSCTTY = 0x20007461 - TIOCSDRAINWAIT = 0x80047457 - TIOCSDTR = 0x20007479 - TIOCSETA = 0x80487414 - TIOCSETAF = 0x80487416 - TIOCSETAW = 0x80487415 - TIOCSETD = 0x8004741b - TIOCSIG = 0x2000745f - TIOCSPGRP = 0x80047476 - TIOCSTART = 0x2000746e - TIOCSTAT = 0x20007465 - TIOCSTI = 0x80017472 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCTIMESTAMP = 0x40107459 - TIOCUCNTL = 0x80047466 - TOSTOP = 0x400000 - VDISCARD = 0xf - VDSUSP = 0xb - VEOF = 0x0 - VEOL = 0x1 - VEOL2 = 0x2 - VERASE = 0x3 - VINTR = 0x8 - VKILL = 0x5 - VLNEXT = 0xe - VMIN = 0x10 - VM_LOADAVG = 0x2 - VM_MACHFACTOR = 0x4 - VM_MAXID = 0x6 - VM_METER = 0x1 - VM_SWAPUSAGE = 0x5 - VQUIT = 0x9 - VREPRINT = 0x6 - VSTART = 0xc - VSTATUS = 0x12 - VSTOP = 0xd - VSUSP = 0xa - VT0 = 0x0 - VT1 = 0x10000 - VTDLY = 0x10000 - VTIME = 0x11 - VWERASE = 0x4 - WCONTINUED = 0x10 - WCOREFLAG = 0x80 - WEXITED = 0x4 - WNOHANG = 0x1 - WNOWAIT = 0x20 - WORDSIZE = 0x40 - WSTOPPED = 0x8 - WUNTRACED = 0x2 - XATTR_CREATE = 0x2 - XATTR_NODEFAULT = 0x10 - XATTR_NOFOLLOW = 0x1 - XATTR_NOSECURITY = 0x8 - XATTR_REPLACE = 0x4 - XATTR_SHOWCOMPRESSION = 0x20 + AF_APPLETALK = 0x10 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1c + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1e + AF_IPX = 0x17 + AF_ISDN = 0x1c + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x29 + AF_NATM = 0x1f + AF_NDRV = 0x1b + AF_NETBIOS = 0x21 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PPP = 0x22 + AF_PUP = 0x4 + AF_RESERVED_36 = 0x24 + AF_ROUTE = 0x11 + AF_SIP = 0x18 + AF_SNA = 0xb + AF_SYSTEM = 0x20 + AF_SYS_CONTROL = 0x2 + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_UTUN = 0x26 + AF_VSOCK = 0x28 + ALTWERASE = 0x200 + ATTR_BIT_MAP_COUNT = 0x5 + ATTR_CMN_ACCESSMASK = 0x20000 + ATTR_CMN_ACCTIME = 0x1000 + ATTR_CMN_ADDEDTIME = 0x10000000 + ATTR_CMN_BKUPTIME = 0x2000 + ATTR_CMN_CHGTIME = 0x800 + ATTR_CMN_CRTIME = 0x200 + ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000 + ATTR_CMN_DEVID = 0x2 + ATTR_CMN_DOCUMENT_ID = 0x100000 + ATTR_CMN_ERROR = 0x20000000 + ATTR_CMN_EXTENDED_SECURITY = 0x400000 + ATTR_CMN_FILEID = 0x2000000 + ATTR_CMN_FLAGS = 0x40000 + ATTR_CMN_FNDRINFO = 0x4000 + ATTR_CMN_FSID = 0x4 + ATTR_CMN_FULLPATH = 0x8000000 + ATTR_CMN_GEN_COUNT = 0x80000 + ATTR_CMN_GRPID = 0x10000 + ATTR_CMN_GRPUUID = 0x1000000 + ATTR_CMN_MODTIME = 0x400 + ATTR_CMN_NAME = 0x1 + ATTR_CMN_NAMEDATTRCOUNT = 0x80000 + ATTR_CMN_NAMEDATTRLIST = 0x100000 + ATTR_CMN_OBJID = 0x20 + ATTR_CMN_OBJPERMANENTID = 0x40 + ATTR_CMN_OBJTAG = 0x10 + ATTR_CMN_OBJTYPE = 0x8 + ATTR_CMN_OWNERID = 0x8000 + ATTR_CMN_PARENTID = 0x4000000 + ATTR_CMN_PAROBJID = 0x80 + ATTR_CMN_RETURNED_ATTRS = 0x80000000 + ATTR_CMN_SCRIPT = 0x100 + ATTR_CMN_SETMASK = 0x51c7ff00 + ATTR_CMN_USERACCESS = 0x200000 + ATTR_CMN_UUID = 0x800000 + ATTR_CMN_VALIDMASK = 0xffffffff + ATTR_CMN_VOLSETMASK = 0x6700 + ATTR_FILE_ALLOCSIZE = 0x4 + ATTR_FILE_CLUMPSIZE = 0x10 + ATTR_FILE_DATAALLOCSIZE = 0x400 + ATTR_FILE_DATAEXTENTS = 0x800 + ATTR_FILE_DATALENGTH = 0x200 + ATTR_FILE_DEVTYPE = 0x20 + ATTR_FILE_FILETYPE = 0x40 + ATTR_FILE_FORKCOUNT = 0x80 + ATTR_FILE_FORKLIST = 0x100 + ATTR_FILE_IOBLOCKSIZE = 0x8 + ATTR_FILE_LINKCOUNT = 0x1 + ATTR_FILE_RSRCALLOCSIZE = 0x2000 + ATTR_FILE_RSRCEXTENTS = 0x4000 + ATTR_FILE_RSRCLENGTH = 0x1000 + ATTR_FILE_SETMASK = 0x20 + ATTR_FILE_TOTALSIZE = 0x2 + ATTR_FILE_VALIDMASK = 0x37ff + ATTR_VOL_ALLOCATIONCLUMP = 0x40 + ATTR_VOL_ATTRIBUTES = 0x40000000 + ATTR_VOL_CAPABILITIES = 0x20000 + ATTR_VOL_DIRCOUNT = 0x400 + ATTR_VOL_ENCODINGSUSED = 0x10000 + ATTR_VOL_FILECOUNT = 0x200 + ATTR_VOL_FSTYPE = 0x1 + ATTR_VOL_INFO = 0x80000000 + ATTR_VOL_IOBLOCKSIZE = 0x80 + ATTR_VOL_MAXOBJCOUNT = 0x800 + ATTR_VOL_MINALLOCATION = 0x20 + ATTR_VOL_MOUNTEDDEVICE = 0x8000 + ATTR_VOL_MOUNTFLAGS = 0x4000 + ATTR_VOL_MOUNTPOINT = 0x1000 + ATTR_VOL_NAME = 0x2000 + ATTR_VOL_OBJCOUNT = 0x100 + ATTR_VOL_QUOTA_SIZE = 0x10000000 + ATTR_VOL_RESERVED_SIZE = 0x20000000 + ATTR_VOL_SETMASK = 0x80002000 + ATTR_VOL_SIGNATURE = 0x2 + ATTR_VOL_SIZE = 0x4 + ATTR_VOL_SPACEAVAIL = 0x10 + ATTR_VOL_SPACEFREE = 0x8 + ATTR_VOL_SPACEUSED = 0x800000 + ATTR_VOL_UUID = 0x40000 + ATTR_VOL_VALIDMASK = 0xf087ffff + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc00c4279 + BIOCGETIF = 0x4020426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044278 + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x8010427e + BIOCSETIF = 0x8020426c + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x8000 + BSDLY = 0x8000 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_MONOTONIC = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_MONOTONIC_RAW_APPROX = 0x5 + CLOCK_PROCESS_CPUTIME_ID = 0xc + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x10 + CLOCK_UPTIME_RAW = 0x8 + CLOCK_UPTIME_RAW_APPROX = 0x9 + CLONE_NOFOLLOW = 0x1 + CLONE_NOOWNERCOPY = 0x2 + CR0 = 0x0 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTLIOCGINFO = 0xc0644e03 + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0x10a + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_DARWIN = 0x10a + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_EXCEPT = -0xf + EVFILT_FS = -0x9 + EVFILT_MACHPORT = -0x8 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x11 + EVFILT_THREADMARKER = 0x11 + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xa + EVFILT_VM = -0xc + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DISPATCH2 = 0x180 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG0 = 0x1000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_OOBAND = 0x2000 + EV_POLL = 0x1000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EV_UDATA_SPECIFIC = 0x100 + EV_VANISHED = 0x200 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x4000 + FFDLY = 0x4000 + FLUSHO = 0x800000 + FSOPT_ATTR_CMN_EXTENDED = 0x20 + FSOPT_NOFOLLOW = 0x1 + FSOPT_NOINMEMUPDATE = 0x2 + FSOPT_PACK_INVAL_ATTRS = 0x8 + FSOPT_REPORT_FULLSIZE = 0x4 + FSOPT_RETURN_REALDEV = 0x200 + F_ADDFILESIGS = 0x3d + F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 + F_ADDFILESIGS_INFO = 0x67 + F_ADDFILESIGS_RETURN = 0x61 + F_ADDFILESUPPL = 0x68 + F_ADDSIGS = 0x3b + F_ALLOCATEALL = 0x4 + F_ALLOCATECONTIG = 0x2 + F_BARRIERFSYNC = 0x55 + F_CHECK_LV = 0x62 + F_CHKCLEAN = 0x29 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x43 + F_FINDSIGS = 0x4e + F_FLUSH_DATA = 0x28 + F_FREEZE_FS = 0x35 + F_FULLFSYNC = 0x33 + F_GETCODEDIR = 0x48 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETLKPID = 0x42 + F_GETNOSIGPIPE = 0x4a + F_GETOWN = 0x5 + F_GETPATH = 0x32 + F_GETPATH_MTMINFO = 0x47 + F_GETPATH_NOFIRMLINK = 0x66 + F_GETPROTECTIONCLASS = 0x3f + F_GETPROTECTIONLEVEL = 0x4d + F_GETSIGSINFO = 0x69 + F_GLOBAL_NOCACHE = 0x37 + F_LOG2PHYS = 0x31 + F_LOG2PHYS_EXT = 0x41 + F_NOCACHE = 0x30 + F_NODIRECT = 0x3e + F_OK = 0x0 + F_PATHPKG_CHECK = 0x34 + F_PEOFPOSMODE = 0x3 + F_PREALLOCATE = 0x2a + F_PUNCHHOLE = 0x63 + F_RDADVISE = 0x2c + F_RDAHEAD = 0x2d + F_RDLCK = 0x1 + F_SETBACKINGSTORE = 0x46 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETLKWTIMEOUT = 0xa + F_SETNOSIGPIPE = 0x49 + F_SETOWN = 0x6 + F_SETPROTECTIONCLASS = 0x40 + F_SETSIZE = 0x2b + F_SINGLE_WRITER = 0x4c + F_SPECULATIVE_READ = 0x65 + F_THAW_FS = 0x36 + F_TRANSCODEKEY = 0x4b + F_TRIM_ACTIVE_FILE = 0x64 + F_UNLCK = 0x2 + F_VOLPOSMODE = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_6LOWPAN = 0x40 + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_CELLULAR = 0xff + IFT_CEPT = 0x13 + IFT_DS3 = 0x1e + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FAITH = 0x38 + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_GIF = 0x37 + IFT_HDH1822 = 0x3 + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IEEE1394 = 0x90 + IFT_IEEE8023ADLAG = 0x88 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_L2VLAN = 0x87 + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PDP = 0xff + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PKTAP = 0xfe + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_STARLAN = 0xb + IFT_STF = 0x39 + IFT_T1 = 0x12 + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LINKLOCALNETNUM = 0xa9fe0000 + IN_LOOPBACKNET = 0x7f + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x400473d1 + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0xfe + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEP = 0x21 + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_2292DSTOPTS = 0x17 + IPV6_2292HOPLIMIT = 0x14 + IPV6_2292HOPOPTS = 0x16 + IPV6_2292NEXTHOP = 0x15 + IPV6_2292PKTINFO = 0x13 + IPV6_2292PKTOPTIONS = 0x19 + IPV6_2292RTHDR = 0x18 + IPV6_3542DSTOPTS = 0x32 + IPV6_3542HOPLIMIT = 0x2f + IPV6_3542HOPOPTS = 0x31 + IPV6_3542NEXTHOP = 0x30 + IPV6_3542PKTINFO = 0x2e + IPV6_3542RTHDR = 0x33 + IPV6_ADDR_MC_FLAGS_PREFIX = 0x20 + IPV6_ADDR_MC_FLAGS_TRANSIENT = 0x10 + IPV6_ADDR_MC_FLAGS_UNICAST_BASED = 0x30 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDV6ONLY = 0x1b + IPV6_BOUND_IF = 0x7d + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FLOW_ECN_MASK = 0x3000 + IPV6_FRAGTTL = 0x3c + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x3d + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x39 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x24 + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BLOCK_SOURCE = 0x48 + IP_BOUND_IF = 0x19 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x1c + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW_ADD = 0x28 + IP_FW_DEL = 0x29 + IP_FW_FLUSH = 0x2a + IP_FW_GET = 0x2c + IP_FW_RESETLOG = 0x2d + IP_FW_ZERO = 0x2b + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_IFINDEX = 0x42 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_NAT__XXX = 0x37 + IP_OFFMASK = 0x1fff + IP_OLD_FW_ADD = 0x32 + IP_OLD_FW_DEL = 0x33 + IP_OLD_FW_FLUSH = 0x34 + IP_OLD_FW_GET = 0x36 + IP_OLD_FW_RESETLOG = 0x38 + IP_OLD_FW_ZERO = 0x35 + IP_OPTIONS = 0x1 + IP_PKTINFO = 0x1a + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVPKTINFO = 0x1a + IP_RECVRETOPTS = 0x6 + IP_RECVTOS = 0x1b + IP_RECVTTL = 0x18 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_STRIPHDR = 0x17 + IP_TOS = 0x3 + IP_TRAFFIC_MGT_BACKGROUND = 0x41 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCAL_PEERCRED = 0x1 + LOCAL_PEEREPID = 0x3 + LOCAL_PEEREUUID = 0x5 + LOCAL_PEERPID = 0x2 + LOCAL_PEERTOKEN = 0x6 + LOCAL_PEERUUID = 0x4 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_CAN_REUSE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_FREE_REUSABLE = 0x7 + MADV_FREE_REUSE = 0x8 + MADV_NORMAL = 0x0 + MADV_PAGEOUT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MADV_ZERO_WIRED_PAGES = 0x6 + MAP_32BIT = 0x8000 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_JIT = 0x800 + MAP_NOCACHE = 0x400 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_RESILIENT_CODESIGN = 0x2000 + MAP_RESILIENT_MEDIA = 0x4000 + MAP_SHARED = 0x1 + MAP_TRANSLATED_ALLOW_EXECUTE = 0x20000 + MAP_UNIX03 = 0x40000 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x400000 + MNT_CMDFLAGS = 0xf0000 + MNT_CPROTECT = 0x80 + MNT_DEFWRITE = 0x2000000 + MNT_DONTBROWSE = 0x100000 + MNT_DOVOLFS = 0x8000 + MNT_DWAIT = 0x4 + MNT_EXPORTED = 0x100 + MNT_EXT_ROOT_DATA_VOL = 0x1 + MNT_FORCE = 0x80000 + MNT_IGNORE_OWNERSHIP = 0x200000 + MNT_JOURNALED = 0x800000 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NOATIME = 0x10000000 + MNT_NOBLOCK = 0x20000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOSUID = 0x8 + MNT_NOUSERXATTR = 0x1000000 + MNT_NOWAIT = 0x2 + MNT_QUARANTINE = 0x400 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_REMOVABLE = 0x200 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x40000000 + MNT_STRICTATIME = 0x80000000 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UNKNOWNPERMISSIONS = 0x200000 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0xd7f0f7ff + MNT_WAIT = 0x1 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_FLUSH = 0x400 + MSG_HAVEMORE = 0x2000 + MSG_HOLD = 0x800 + MSG_NEEDSA = 0x10000 + MSG_NOSIGNAL = 0x80000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_RCVMORE = 0x4000 + MSG_SEND = 0x1000 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITSTREAM = 0x200 + MS_ASYNC = 0x1 + MS_DEACTIVATE = 0x8 + MS_INVALIDATE = 0x2 + MS_KILLPAGES = 0x4 + MS_SYNC = 0x10 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_DUMP2 = 0x7 + NET_RT_FLAGS = 0x2 + NET_RT_FLAGS_PRIV = 0xa + NET_RT_IFLIST = 0x3 + NET_RT_IFLIST2 = 0x6 + NET_RT_MAXID = 0xb + NET_RT_STAT = 0x4 + NET_RT_TRASH = 0x5 + NFDBITS = 0x20 + NL0 = 0x0 + NL1 = 0x100 + NL2 = 0x200 + NL3 = 0x300 + NLDLY = 0x300 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ABSOLUTE = 0x8 + NOTE_ATTRIB = 0x8 + NOTE_BACKGROUND = 0x40 + NOTE_CHILD = 0x4 + NOTE_CRITICAL = 0x20 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXITSTATUS = 0x4000000 + NOTE_EXIT_CSERROR = 0x40000 + NOTE_EXIT_DECRYPTFAIL = 0x10000 + NOTE_EXIT_DETAIL = 0x2000000 + NOTE_EXIT_DETAIL_MASK = 0x70000 + NOTE_EXIT_MEMORY = 0x20000 + NOTE_EXIT_REPARENTED = 0x80000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_FUNLOCK = 0x100 + NOTE_LEEWAY = 0x10 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MACHTIME = 0x100 + NOTE_MACH_CONTINUOUS_TIME = 0x80 + NOTE_NONE = 0x80 + NOTE_NSECONDS = 0x4 + NOTE_OOB = 0x2 + NOTE_PCTRLMASK = -0x100000 + NOTE_PDATAMASK = 0xfffff + NOTE_REAP = 0x10000000 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_SIGNAL = 0x8000000 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x2 + NOTE_VM_ERROR = 0x10000000 + NOTE_VM_PRESSURE = 0x80000000 + NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 + NOTE_VM_PRESSURE_TERMINATE = 0x40000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFDEL = 0x20000 + OFILL = 0x80 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_ALERT = 0x20000000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x1000000 + O_CREAT = 0x200 + O_DIRECTORY = 0x100000 + O_DP_GETRAWENCRYPTED = 0x1 + O_DP_GETRAWUNENCRYPTED = 0x2 + O_DSYNC = 0x400000 + O_EVTONLY = 0x8000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x20000 + O_NOFOLLOW = 0x100 + O_NOFOLLOW_ANY = 0x20000000 + O_NONBLOCK = 0x4 + O_POPUP = 0x80000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYMLINK = 0x200000 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PT_ATTACH = 0xa + PT_ATTACHEXC = 0xe + PT_CONTINUE = 0x7 + PT_DENY_ATTACH = 0x1f + PT_DETACH = 0xb + PT_FIRSTMACH = 0x20 + PT_FORCEQUOTA = 0x1e + PT_KILL = 0x8 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_READ_U = 0x3 + PT_SIGEXC = 0xc + PT_STEP = 0x9 + PT_THUPDATE = 0xd + PT_TRACE_ME = 0x0 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + PT_WRITE_U = 0x6 + RLIMIT_AS = 0x5 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_CPU_USAGE_MONITOR = 0x2 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONING = 0x100 + RTF_CONDEMNED = 0x2000000 + RTF_DEAD = 0x20000000 + RTF_DELCLONE = 0x80 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_GLOBAL = 0x40000000 + RTF_HOST = 0x4 + RTF_IFREF = 0x4000000 + RTF_IFSCOPE = 0x1000000 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_NOIFREF = 0x2000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_PROXY = 0x8000000 + RTF_REJECT = 0x8 + RTF_ROUTER = 0x10000000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_WASCLONED = 0x20000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_GET2 = 0x14 + RTM_IFINFO = 0xe + RTM_IFINFO2 = 0x12 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_NEWMADDR2 = 0x13 + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIMESTAMP_MONOTONIC = 0x4 + SEEK_CUR = 0x1 + SEEK_DATA = 0x4 + SEEK_END = 0x2 + SEEK_HOLE = 0x3 + SEEK_SET = 0x0 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCARPIPLL = 0xc0206928 + SIOCATMARK = 0x40047307 + SIOCAUTOADDR = 0xc0206926 + SIOCAUTONETMASK = 0x80206927 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFPHYADDR = 0x80206941 + SIOCGDRVSPEC = 0xc028697b + SIOCGETVLAN = 0xc020697f + SIOCGHIWAT = 0x40047301 + SIOCGIF6LOWPAN = 0xc02069c5 + SIOCGIFADDR = 0xc0206921 + SIOCGIFALTMTU = 0xc0206948 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBOND = 0xc0206947 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020695b + SIOCGIFCONF = 0xc00c6924 + SIOCGIFDEVMTU = 0xc0206944 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFFUNCTIONALTYPE = 0xc02069ad + SIOCGIFGENERIC = 0xc020693a + SIOCGIFKPI = 0xc0206987 + SIOCGIFMAC = 0xc0206982 + SIOCGIFMEDIA = 0xc02c6938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206940 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc020693f + SIOCGIFSTATUS = 0xc331693d + SIOCGIFVLAN = 0xc020697f + SIOCGIFWAKEFLAGS = 0xc0206988 + SIOCGIFXMEDIA = 0xc02c6948 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCIFCREATE = 0xc0206978 + SIOCIFCREATE2 = 0xc020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106981 + SIOCRSLVMULTI = 0xc010693b + SIOCSDRVSPEC = 0x8028697b + SIOCSETVLAN = 0x8020697e + SIOCSHIWAT = 0x80047300 + SIOCSIF6LOWPAN = 0x802069c4 + SIOCSIFADDR = 0x8020690c + SIOCSIFALTMTU = 0x80206945 + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBOND = 0x80206946 + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020695a + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFKPI = 0x80206986 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206983 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x8040693e + SIOCSIFPHYS = 0x80206936 + SIOCSIFVLAN = 0x8020697e + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_DONTTRUNC = 0x2000 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1010 + SO_LINGER = 0x80 + SO_LINGER_SEC = 0x1080 + SO_NETSVC_MARKING_LEVEL = 0x1119 + SO_NET_SERVICE_TYPE = 0x1116 + SO_NKE = 0x1021 + SO_NOADDRERR = 0x1023 + SO_NOSIGPIPE = 0x1022 + SO_NOTIFYCONFLICT = 0x1026 + SO_NP_EXTENSIONS = 0x1083 + SO_NREAD = 0x1020 + SO_NUMRCVPKT = 0x1112 + SO_NWRITE = 0x1024 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1011 + SO_RANDOMPORT = 0x1082 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSESHAREUID = 0x1025 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TIMESTAMP_MONOTONIC = 0x800 + SO_TRACKER_ATTRIBUTE_FLAGS_APP_APPROVED = 0x1 + SO_TRACKER_ATTRIBUTE_FLAGS_DOMAIN_SHORT = 0x4 + SO_TRACKER_ATTRIBUTE_FLAGS_TRACKER = 0x2 + SO_TRACKER_TRANSPARENCY_VERSION = 0x3 + SO_TYPE = 0x1008 + SO_UPCALLCLOSEWAIT = 0x1027 + SO_USELOOPBACK = 0x40 + SO_WANTMORE = 0x4000 + SO_WANTOOBFLAG = 0x8000 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0x4 + TABDLY = 0xc04 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_CC = 0xb + TCPOPT_CCECHO = 0xd + TCPOPT_CCNEW = 0xc + TCPOPT_EOL = 0x0 + TCPOPT_FASTOPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_CONNECTIONTIMEOUT = 0x20 + TCP_CONNECTION_INFO = 0x106 + TCP_ENABLE_ECN = 0x104 + TCP_FASTOPEN = 0x105 + TCP_KEEPALIVE = 0x10 + TCP_KEEPCNT = 0x102 + TCP_KEEPINTVL = 0x101 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MINMSS = 0xd8 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NOTSENT_LOWAT = 0x201 + TCP_RXT_CONNDROPTIME = 0x80 + TCP_RXT_FINDROP = 0x100 + TCP_SENDMOREACKS = 0x103 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40107458 + TIOCDRAIN = 0x2000745e + TIOCDSIMICROCODE = 0x20007455 + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x40487413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGWINSZ = 0x40087468 + TIOCIXOFF = 0x20007480 + TIOCIXON = 0x20007481 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTYGNAME = 0x40807453 + TIOCPTYGRANT = 0x20007454 + TIOCPTYUNLK = 0x20007452 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCONS = 0x20007463 + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x80487414 + TIOCSETAF = 0x80487416 + TIOCSETAW = 0x80487415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2000745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x10 + VM_LOADAVG = 0x2 + VM_MACHFACTOR = 0x4 + VM_MAXID = 0x6 + VM_METER = 0x1 + VM_SWAPUSAGE = 0x5 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VT0 = 0x0 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x10 + WCOREFLAG = 0x80 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOWAIT = 0x20 + WORDSIZE = 0x40 + WSTOPPED = 0x8 + WUNTRACED = 0x2 + XATTR_CREATE = 0x2 + XATTR_NODEFAULT = 0x10 + XATTR_NOFOLLOW = 0x1 + XATTR_NOSECURITY = 0x8 + XATTR_REPLACE = 0x4 + XATTR_SHOWCOMPRESSION = 0x20 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index 31009d7f05..e36f5178d6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -12,1556 +12,1582 @@ package unix import "syscall" const ( - AF_APPLETALK = 0x10 - AF_CCITT = 0xa - AF_CHAOS = 0x5 - AF_CNT = 0x15 - AF_COIP = 0x14 - AF_DATAKIT = 0x9 - AF_DECnet = 0xc - AF_DLI = 0xd - AF_E164 = 0x1c - AF_ECMA = 0x8 - AF_HYLINK = 0xf - AF_IEEE80211 = 0x25 - AF_IMPLINK = 0x3 - AF_INET = 0x2 - AF_INET6 = 0x1e - AF_IPX = 0x17 - AF_ISDN = 0x1c - AF_ISO = 0x7 - AF_LAT = 0xe - AF_LINK = 0x12 - AF_LOCAL = 0x1 - AF_MAX = 0x29 - AF_NATM = 0x1f - AF_NDRV = 0x1b - AF_NETBIOS = 0x21 - AF_NS = 0x6 - AF_OSI = 0x7 - AF_PPP = 0x22 - AF_PUP = 0x4 - AF_RESERVED_36 = 0x24 - AF_ROUTE = 0x11 - AF_SIP = 0x18 - AF_SNA = 0xb - AF_SYSTEM = 0x20 - AF_SYS_CONTROL = 0x2 - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_UTUN = 0x26 - AF_VSOCK = 0x28 - ALTWERASE = 0x200 - ATTR_BIT_MAP_COUNT = 0x5 - ATTR_CMN_ACCESSMASK = 0x20000 - ATTR_CMN_ACCTIME = 0x1000 - ATTR_CMN_ADDEDTIME = 0x10000000 - ATTR_CMN_BKUPTIME = 0x2000 - ATTR_CMN_CHGTIME = 0x800 - ATTR_CMN_CRTIME = 0x200 - ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000 - ATTR_CMN_DEVID = 0x2 - ATTR_CMN_DOCUMENT_ID = 0x100000 - ATTR_CMN_ERROR = 0x20000000 - ATTR_CMN_EXTENDED_SECURITY = 0x400000 - ATTR_CMN_FILEID = 0x2000000 - ATTR_CMN_FLAGS = 0x40000 - ATTR_CMN_FNDRINFO = 0x4000 - ATTR_CMN_FSID = 0x4 - ATTR_CMN_FULLPATH = 0x8000000 - ATTR_CMN_GEN_COUNT = 0x80000 - ATTR_CMN_GRPID = 0x10000 - ATTR_CMN_GRPUUID = 0x1000000 - ATTR_CMN_MODTIME = 0x400 - ATTR_CMN_NAME = 0x1 - ATTR_CMN_NAMEDATTRCOUNT = 0x80000 - ATTR_CMN_NAMEDATTRLIST = 0x100000 - ATTR_CMN_OBJID = 0x20 - ATTR_CMN_OBJPERMANENTID = 0x40 - ATTR_CMN_OBJTAG = 0x10 - ATTR_CMN_OBJTYPE = 0x8 - ATTR_CMN_OWNERID = 0x8000 - ATTR_CMN_PARENTID = 0x4000000 - ATTR_CMN_PAROBJID = 0x80 - ATTR_CMN_RETURNED_ATTRS = 0x80000000 - ATTR_CMN_SCRIPT = 0x100 - ATTR_CMN_SETMASK = 0x51c7ff00 - ATTR_CMN_USERACCESS = 0x200000 - ATTR_CMN_UUID = 0x800000 - ATTR_CMN_VALIDMASK = 0xffffffff - ATTR_CMN_VOLSETMASK = 0x6700 - ATTR_FILE_ALLOCSIZE = 0x4 - ATTR_FILE_CLUMPSIZE = 0x10 - ATTR_FILE_DATAALLOCSIZE = 0x400 - ATTR_FILE_DATAEXTENTS = 0x800 - ATTR_FILE_DATALENGTH = 0x200 - ATTR_FILE_DEVTYPE = 0x20 - ATTR_FILE_FILETYPE = 0x40 - ATTR_FILE_FORKCOUNT = 0x80 - ATTR_FILE_FORKLIST = 0x100 - ATTR_FILE_IOBLOCKSIZE = 0x8 - ATTR_FILE_LINKCOUNT = 0x1 - ATTR_FILE_RSRCALLOCSIZE = 0x2000 - ATTR_FILE_RSRCEXTENTS = 0x4000 - ATTR_FILE_RSRCLENGTH = 0x1000 - ATTR_FILE_SETMASK = 0x20 - ATTR_FILE_TOTALSIZE = 0x2 - ATTR_FILE_VALIDMASK = 0x37ff - ATTR_VOL_ALLOCATIONCLUMP = 0x40 - ATTR_VOL_ATTRIBUTES = 0x40000000 - ATTR_VOL_CAPABILITIES = 0x20000 - ATTR_VOL_DIRCOUNT = 0x400 - ATTR_VOL_ENCODINGSUSED = 0x10000 - ATTR_VOL_FILECOUNT = 0x200 - ATTR_VOL_FSTYPE = 0x1 - ATTR_VOL_INFO = 0x80000000 - ATTR_VOL_IOBLOCKSIZE = 0x80 - ATTR_VOL_MAXOBJCOUNT = 0x800 - ATTR_VOL_MINALLOCATION = 0x20 - ATTR_VOL_MOUNTEDDEVICE = 0x8000 - ATTR_VOL_MOUNTFLAGS = 0x4000 - ATTR_VOL_MOUNTPOINT = 0x1000 - ATTR_VOL_NAME = 0x2000 - ATTR_VOL_OBJCOUNT = 0x100 - ATTR_VOL_QUOTA_SIZE = 0x10000000 - ATTR_VOL_RESERVED_SIZE = 0x20000000 - ATTR_VOL_SETMASK = 0x80002000 - ATTR_VOL_SIGNATURE = 0x2 - ATTR_VOL_SIZE = 0x4 - ATTR_VOL_SPACEAVAIL = 0x10 - ATTR_VOL_SPACEFREE = 0x8 - ATTR_VOL_UUID = 0x40000 - ATTR_VOL_VALIDMASK = 0xf007ffff - B0 = 0x0 - B110 = 0x6e - B115200 = 0x1c200 - B1200 = 0x4b0 - B134 = 0x86 - B14400 = 0x3840 - B150 = 0x96 - B1800 = 0x708 - B19200 = 0x4b00 - B200 = 0xc8 - B230400 = 0x38400 - B2400 = 0x960 - B28800 = 0x7080 - B300 = 0x12c - B38400 = 0x9600 - B4800 = 0x12c0 - B50 = 0x32 - B57600 = 0xe100 - B600 = 0x258 - B7200 = 0x1c20 - B75 = 0x4b - B76800 = 0x12c00 - B9600 = 0x2580 - BIOCFLUSH = 0x20004268 - BIOCGBLEN = 0x40044266 - BIOCGDLT = 0x4004426a - BIOCGDLTLIST = 0xc00c4279 - BIOCGETIF = 0x4020426b - BIOCGHDRCMPLT = 0x40044274 - BIOCGRSIG = 0x40044272 - BIOCGRTIMEOUT = 0x4010426e - BIOCGSEESENT = 0x40044276 - BIOCGSTATS = 0x4008426f - BIOCIMMEDIATE = 0x80044270 - BIOCPROMISC = 0x20004269 - BIOCSBLEN = 0xc0044266 - BIOCSDLT = 0x80044278 - BIOCSETF = 0x80104267 - BIOCSETFNR = 0x8010427e - BIOCSETIF = 0x8020426c - BIOCSHDRCMPLT = 0x80044275 - BIOCSRSIG = 0x80044273 - BIOCSRTIMEOUT = 0x8010426d - BIOCSSEESENT = 0x80044277 - BIOCVERSION = 0x40044271 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ALIGNMENT = 0x4 - BPF_ALU = 0x4 - BPF_AND = 0x50 - BPF_B = 0x10 - BPF_DIV = 0x30 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JMP = 0x5 - BPF_JSET = 0x40 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXBUFSIZE = 0x80000 - BPF_MAXINSNS = 0x200 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINBUFSIZE = 0x20 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_OR = 0x40 - BPF_RELEASE = 0x30bb6 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAX = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 - CFLUSH = 0xf - CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_MONOTONIC_RAW_APPROX = 0x5 - CLOCK_PROCESS_CPUTIME_ID = 0xc - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x10 - CLOCK_UPTIME_RAW = 0x8 - CLOCK_UPTIME_RAW_APPROX = 0x9 - CLONE_NOFOLLOW = 0x1 - CLONE_NOOWNERCOPY = 0x2 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRDLY = 0x3000 - CREAD = 0x800 - CRTSCTS = 0x30000 - CS5 = 0x0 - CS6 = 0x100 - CS7 = 0x200 - CS8 = 0x300 - CSIZE = 0x300 - CSTART = 0x11 - CSTATUS = 0x14 - CSTOP = 0x13 - CSTOPB = 0x400 - CSUSP = 0x1a - CTLIOCGINFO = 0xc0644e03 - CTL_HW = 0x6 - CTL_KERN = 0x1 - CTL_MAXNAME = 0xc - CTL_NET = 0x4 - DLT_A429 = 0xb8 - DLT_A653_ICM = 0xb9 - DLT_AIRONET_HEADER = 0x78 - DLT_AOS = 0xde - DLT_APPLE_IP_OVER_IEEE1394 = 0x8a - DLT_ARCNET = 0x7 - DLT_ARCNET_LINUX = 0x81 - DLT_ATM_CLIP = 0x13 - DLT_ATM_RFC1483 = 0xb - DLT_AURORA = 0x7e - DLT_AX25 = 0x3 - DLT_AX25_KISS = 0xca - DLT_BACNET_MS_TP = 0xa5 - DLT_BLUETOOTH_HCI_H4 = 0xbb - DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 - DLT_CAN20B = 0xbe - DLT_CAN_SOCKETCAN = 0xe3 - DLT_CHAOS = 0x5 - DLT_CHDLC = 0x68 - DLT_CISCO_IOS = 0x76 - DLT_C_HDLC = 0x68 - DLT_C_HDLC_WITH_DIR = 0xcd - DLT_DBUS = 0xe7 - DLT_DECT = 0xdd - DLT_DOCSIS = 0x8f - DLT_DVB_CI = 0xeb - DLT_ECONET = 0x73 - DLT_EN10MB = 0x1 - DLT_EN3MB = 0x2 - DLT_ENC = 0x6d - DLT_ERF = 0xc5 - DLT_ERF_ETH = 0xaf - DLT_ERF_POS = 0xb0 - DLT_FC_2 = 0xe0 - DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 - DLT_FDDI = 0xa - DLT_FLEXRAY = 0xd2 - DLT_FRELAY = 0x6b - DLT_FRELAY_WITH_DIR = 0xce - DLT_GCOM_SERIAL = 0xad - DLT_GCOM_T1E1 = 0xac - DLT_GPF_F = 0xab - DLT_GPF_T = 0xaa - DLT_GPRS_LLC = 0xa9 - DLT_GSMTAP_ABIS = 0xda - DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 - DLT_IBM_SN = 0x92 - DLT_IBM_SP = 0x91 - DLT_IEEE802 = 0x6 - DLT_IEEE802_11 = 0x69 - DLT_IEEE802_11_RADIO = 0x7f - DLT_IEEE802_11_RADIO_AVS = 0xa3 - DLT_IEEE802_15_4 = 0xc3 - DLT_IEEE802_15_4_LINUX = 0xbf - DLT_IEEE802_15_4_NOFCS = 0xe6 - DLT_IEEE802_15_4_NONASK_PHY = 0xd7 - DLT_IEEE802_16_MAC_CPS = 0xbc - DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 - DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 - DLT_IPMB_LINUX = 0xd1 - DLT_IPNET = 0xe2 - DLT_IPOIB = 0xf2 - DLT_IPV4 = 0xe4 - DLT_IPV6 = 0xe5 - DLT_IP_OVER_FC = 0x7a - DLT_JUNIPER_ATM1 = 0x89 - DLT_JUNIPER_ATM2 = 0x87 - DLT_JUNIPER_ATM_CEMIC = 0xee - DLT_JUNIPER_CHDLC = 0xb5 - DLT_JUNIPER_ES = 0x84 - DLT_JUNIPER_ETHER = 0xb2 - DLT_JUNIPER_FIBRECHANNEL = 0xea - DLT_JUNIPER_FRELAY = 0xb4 - DLT_JUNIPER_GGSN = 0x85 - DLT_JUNIPER_ISM = 0xc2 - DLT_JUNIPER_MFR = 0x86 - DLT_JUNIPER_MLFR = 0x83 - DLT_JUNIPER_MLPPP = 0x82 - DLT_JUNIPER_MONITOR = 0xa4 - DLT_JUNIPER_PIC_PEER = 0xae - DLT_JUNIPER_PPP = 0xb3 - DLT_JUNIPER_PPPOE = 0xa7 - DLT_JUNIPER_PPPOE_ATM = 0xa8 - DLT_JUNIPER_SERVICES = 0x88 - DLT_JUNIPER_SRX_E2E = 0xe9 - DLT_JUNIPER_ST = 0xc8 - DLT_JUNIPER_VP = 0xb7 - DLT_JUNIPER_VS = 0xe8 - DLT_LAPB_WITH_DIR = 0xcf - DLT_LAPD = 0xcb - DLT_LIN = 0xd4 - DLT_LINUX_EVDEV = 0xd8 - DLT_LINUX_IRDA = 0x90 - DLT_LINUX_LAPD = 0xb1 - DLT_LINUX_PPP_WITHDIRECTION = 0xa6 - DLT_LINUX_SLL = 0x71 - DLT_LOOP = 0x6c - DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x10a - DLT_MATCHING_MIN = 0x68 - DLT_MFR = 0xb6 - DLT_MOST = 0xd3 - DLT_MPEG_2_TS = 0xf3 - DLT_MPLS = 0xdb - DLT_MTP2 = 0x8c - DLT_MTP2_WITH_PHDR = 0x8b - DLT_MTP3 = 0x8d - DLT_MUX27010 = 0xec - DLT_NETANALYZER = 0xf0 - DLT_NETANALYZER_TRANSPARENT = 0xf1 - DLT_NFC_LLCP = 0xf5 - DLT_NFLOG = 0xef - DLT_NG40 = 0xf4 - DLT_NULL = 0x0 - DLT_PCI_EXP = 0x7d - DLT_PFLOG = 0x75 - DLT_PFSYNC = 0x12 - DLT_PPI = 0xc0 - DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 - DLT_PPP_ETHER = 0x33 - DLT_PPP_PPPD = 0xa6 - DLT_PPP_SERIAL = 0x32 - DLT_PPP_WITH_DIR = 0xcc - DLT_PPP_WITH_DIRECTION = 0xa6 - DLT_PRISM_HEADER = 0x77 - DLT_PRONET = 0x4 - DLT_RAIF1 = 0xc6 - DLT_RAW = 0xc - DLT_RIO = 0x7c - DLT_SCCP = 0x8e - DLT_SITA = 0xc4 - DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf - DLT_STANAG_5066_D_PDU = 0xed - DLT_SUNATM = 0x7b - DLT_SYMANTEC_FIREWALL = 0x63 - DLT_TZSP = 0x80 - DLT_USB = 0xba - DLT_USB_DARWIN = 0x10a - DLT_USB_LINUX = 0xbd - DLT_USB_LINUX_MMAPPED = 0xdc - DLT_USER0 = 0x93 - DLT_USER1 = 0x94 - DLT_USER10 = 0x9d - DLT_USER11 = 0x9e - DLT_USER12 = 0x9f - DLT_USER13 = 0xa0 - DLT_USER14 = 0xa1 - DLT_USER15 = 0xa2 - DLT_USER2 = 0x95 - DLT_USER3 = 0x96 - DLT_USER4 = 0x97 - DLT_USER5 = 0x98 - DLT_USER6 = 0x99 - DLT_USER7 = 0x9a - DLT_USER8 = 0x9b - DLT_USER9 = 0x9c - DLT_WIHART = 0xdf - DLT_X2E_SERIAL = 0xd5 - DLT_X2E_XORAYA = 0xd6 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x40 - ECHOE = 0x2 - ECHOK = 0x4 - ECHOKE = 0x1 - ECHONL = 0x10 - ECHOPRT = 0x20 - EVFILT_AIO = -0x3 - EVFILT_EXCEPT = -0xf - EVFILT_FS = -0x9 - EVFILT_MACHPORT = -0x8 - EVFILT_PROC = -0x5 - EVFILT_READ = -0x1 - EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x11 - EVFILT_THREADMARKER = 0x11 - EVFILT_TIMER = -0x7 - EVFILT_USER = -0xa - EVFILT_VM = -0xc - EVFILT_VNODE = -0x4 - EVFILT_WRITE = -0x2 - EV_ADD = 0x1 - EV_CLEAR = 0x20 - EV_DELETE = 0x2 - EV_DISABLE = 0x8 - EV_DISPATCH = 0x80 - EV_DISPATCH2 = 0x180 - EV_ENABLE = 0x4 - EV_EOF = 0x8000 - EV_ERROR = 0x4000 - EV_FLAG0 = 0x1000 - EV_FLAG1 = 0x2000 - EV_ONESHOT = 0x10 - EV_OOBAND = 0x2000 - EV_POLL = 0x1000 - EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 - EV_UDATA_SPECIFIC = 0x100 - EV_VANISHED = 0x200 - EXTA = 0x4b00 - EXTB = 0x9600 - EXTPROC = 0x800 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 - FLUSHO = 0x800000 - FSOPT_ATTR_CMN_EXTENDED = 0x20 - FSOPT_NOFOLLOW = 0x1 - FSOPT_NOINMEMUPDATE = 0x2 - FSOPT_PACK_INVAL_ATTRS = 0x8 - FSOPT_REPORT_FULLSIZE = 0x4 - FSOPT_RETURN_REALDEV = 0x200 - F_ADDFILESIGS = 0x3d - F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 - F_ADDFILESIGS_INFO = 0x67 - F_ADDFILESIGS_RETURN = 0x61 - F_ADDFILESUPPL = 0x68 - F_ADDSIGS = 0x3b - F_ALLOCATEALL = 0x4 - F_ALLOCATECONTIG = 0x2 - F_BARRIERFSYNC = 0x55 - F_CHECK_LV = 0x62 - F_CHKCLEAN = 0x29 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x43 - F_FINDSIGS = 0x4e - F_FLUSH_DATA = 0x28 - F_FREEZE_FS = 0x35 - F_FULLFSYNC = 0x33 - F_GETCODEDIR = 0x48 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLK = 0x7 - F_GETLKPID = 0x42 - F_GETNOSIGPIPE = 0x4a - F_GETOWN = 0x5 - F_GETPATH = 0x32 - F_GETPATH_MTMINFO = 0x47 - F_GETPATH_NOFIRMLINK = 0x66 - F_GETPROTECTIONCLASS = 0x3f - F_GETPROTECTIONLEVEL = 0x4d - F_GETSIGSINFO = 0x69 - F_GLOBAL_NOCACHE = 0x37 - F_LOG2PHYS = 0x31 - F_LOG2PHYS_EXT = 0x41 - F_NOCACHE = 0x30 - F_NODIRECT = 0x3e - F_OK = 0x0 - F_PATHPKG_CHECK = 0x34 - F_PEOFPOSMODE = 0x3 - F_PREALLOCATE = 0x2a - F_PUNCHHOLE = 0x63 - F_RDADVISE = 0x2c - F_RDAHEAD = 0x2d - F_RDLCK = 0x1 - F_SETBACKINGSTORE = 0x46 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLK = 0x8 - F_SETLKW = 0x9 - F_SETLKWTIMEOUT = 0xa - F_SETNOSIGPIPE = 0x49 - F_SETOWN = 0x6 - F_SETPROTECTIONCLASS = 0x40 - F_SETSIZE = 0x2b - F_SINGLE_WRITER = 0x4c - F_SPECULATIVE_READ = 0x65 - F_THAW_FS = 0x36 - F_TRANSCODEKEY = 0x4b - F_TRIM_ACTIVE_FILE = 0x64 - F_UNLCK = 0x2 - F_VOLPOSMODE = 0x4 - F_WRLCK = 0x3 - HUPCL = 0x4000 - HW_MACHINE = 0x1 - ICANON = 0x100 - ICMP6_FILTER = 0x12 - ICRNL = 0x100 - IEXTEN = 0x400 - IFF_ALLMULTI = 0x200 - IFF_ALTPHYS = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_LINK0 = 0x1000 - IFF_LINK1 = 0x2000 - IFF_LINK2 = 0x4000 - IFF_LOOPBACK = 0x8 - IFF_MULTICAST = 0x8000 - IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 - IFF_OACTIVE = 0x400 - IFF_POINTOPOINT = 0x10 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SIMPLEX = 0x800 - IFF_UP = 0x1 - IFNAMSIZ = 0x10 - IFT_1822 = 0x2 - IFT_6LOWPAN = 0x40 - IFT_AAL5 = 0x31 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ATM = 0x25 - IFT_BRIDGE = 0xd1 - IFT_CARP = 0xf8 - IFT_CELLULAR = 0xff - IFT_CEPT = 0x13 - IFT_DS3 = 0x1e - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_ETHER = 0x6 - IFT_FAITH = 0x38 - IFT_FDDI = 0xf - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_GIF = 0x37 - IFT_HDH1822 = 0x3 - IFT_HIPPI = 0x2f - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IEEE1394 = 0x90 - IFT_IEEE8023ADLAG = 0x88 - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88026 = 0xa - IFT_L2VLAN = 0x87 - IFT_LAPB = 0x10 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_NSIP = 0x1b - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PDP = 0xff - IFT_PFLOG = 0xf5 - IFT_PFSYNC = 0xf6 - IFT_PKTAP = 0xfe - IFT_PPP = 0x17 - IFT_PROPMUX = 0x36 - IFT_PROPVIRTUAL = 0x35 - IFT_PTPSERIAL = 0x16 - IFT_RS232 = 0x21 - IFT_SDLC = 0x11 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_STARLAN = 0xb - IFT_STF = 0x39 - IFT_T1 = 0x12 - IFT_ULTRA = 0x1d - IFT_V35 = 0x2d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLASSD_HOST = 0xfffffff - IN_CLASSD_NET = 0xf0000000 - IN_CLASSD_NSHIFT = 0x1c - IN_LINKLOCALNETNUM = 0xa9fe0000 - IN_LOOPBACKNET = 0x7f - IPPROTO_3PC = 0x22 - IPPROTO_ADFS = 0x44 - IPPROTO_AH = 0x33 - IPPROTO_AHIP = 0x3d - IPPROTO_APES = 0x63 - IPPROTO_ARGUS = 0xd - IPPROTO_AX25 = 0x5d - IPPROTO_BHA = 0x31 - IPPROTO_BLT = 0x1e - IPPROTO_BRSATMON = 0x4c - IPPROTO_CFTP = 0x3e - IPPROTO_CHAOS = 0x10 - IPPROTO_CMTP = 0x26 - IPPROTO_CPHB = 0x49 - IPPROTO_CPNX = 0x48 - IPPROTO_DDP = 0x25 - IPPROTO_DGP = 0x56 - IPPROTO_DIVERT = 0xfe - IPPROTO_DONE = 0x101 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_EMCON = 0xe - IPPROTO_ENCAP = 0x62 - IPPROTO_EON = 0x50 - IPPROTO_ESP = 0x32 - IPPROTO_ETHERIP = 0x61 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GGP = 0x3 - IPPROTO_GMTP = 0x64 - IPPROTO_GRE = 0x2f - IPPROTO_HELLO = 0x3f - IPPROTO_HMP = 0x14 - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IDPR = 0x23 - IPPROTO_IDRP = 0x2d - IPPROTO_IGMP = 0x2 - IPPROTO_IGP = 0x55 - IPPROTO_IGRP = 0x58 - IPPROTO_IL = 0x28 - IPPROTO_INLSP = 0x34 - IPPROTO_INP = 0x20 - IPPROTO_IP = 0x0 - IPPROTO_IPCOMP = 0x6c - IPPROTO_IPCV = 0x47 - IPPROTO_IPEIP = 0x5e - IPPROTO_IPIP = 0x4 - IPPROTO_IPPC = 0x43 - IPPROTO_IPV4 = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_IRTP = 0x1c - IPPROTO_KRYPTOLAN = 0x41 - IPPROTO_LARP = 0x5b - IPPROTO_LEAF1 = 0x19 - IPPROTO_LEAF2 = 0x1a - IPPROTO_MAX = 0x100 - IPPROTO_MAXID = 0x34 - IPPROTO_MEAS = 0x13 - IPPROTO_MHRP = 0x30 - IPPROTO_MICP = 0x5f - IPPROTO_MTP = 0x5c - IPPROTO_MUX = 0x12 - IPPROTO_ND = 0x4d - IPPROTO_NHRP = 0x36 - IPPROTO_NONE = 0x3b - IPPROTO_NSP = 0x1f - IPPROTO_NVPII = 0xb - IPPROTO_OSPFIGP = 0x59 - IPPROTO_PGM = 0x71 - IPPROTO_PIGP = 0x9 - IPPROTO_PIM = 0x67 - IPPROTO_PRM = 0x15 - IPPROTO_PUP = 0xc - IPPROTO_PVP = 0x4b - IPPROTO_RAW = 0xff - IPPROTO_RCCMON = 0xa - IPPROTO_RDP = 0x1b - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_RVD = 0x42 - IPPROTO_SATEXPAK = 0x40 - IPPROTO_SATMON = 0x45 - IPPROTO_SCCSP = 0x60 - IPPROTO_SCTP = 0x84 - IPPROTO_SDRP = 0x2a - IPPROTO_SEP = 0x21 - IPPROTO_SRPC = 0x5a - IPPROTO_ST = 0x7 - IPPROTO_SVMTP = 0x52 - IPPROTO_SWIPE = 0x35 - IPPROTO_TCF = 0x57 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_TPXX = 0x27 - IPPROTO_TRUNK1 = 0x17 - IPPROTO_TRUNK2 = 0x18 - IPPROTO_TTP = 0x54 - IPPROTO_UDP = 0x11 - IPPROTO_VINES = 0x53 - IPPROTO_VISA = 0x46 - IPPROTO_VMTP = 0x51 - IPPROTO_WBEXPAK = 0x4f - IPPROTO_WBMON = 0x4e - IPPROTO_WSN = 0x4a - IPPROTO_XNET = 0xf - IPPROTO_XTP = 0x24 - IPV6_2292DSTOPTS = 0x17 - IPV6_2292HOPLIMIT = 0x14 - IPV6_2292HOPOPTS = 0x16 - IPV6_2292NEXTHOP = 0x15 - IPV6_2292PKTINFO = 0x13 - IPV6_2292PKTOPTIONS = 0x19 - IPV6_2292RTHDR = 0x18 - IPV6_3542DSTOPTS = 0x32 - IPV6_3542HOPLIMIT = 0x2f - IPV6_3542HOPOPTS = 0x31 - IPV6_3542NEXTHOP = 0x30 - IPV6_3542PKTINFO = 0x2e - IPV6_3542RTHDR = 0x33 - IPV6_ADDR_MC_FLAGS_PREFIX = 0x20 - IPV6_ADDR_MC_FLAGS_TRANSIENT = 0x10 - IPV6_ADDR_MC_FLAGS_UNICAST_BASED = 0x30 - IPV6_AUTOFLOWLABEL = 0x3b - IPV6_BINDV6ONLY = 0x1b - IPV6_BOUND_IF = 0x7d - IPV6_CHECKSUM = 0x1a - IPV6_DEFAULT_MULTICAST_HOPS = 0x1 - IPV6_DEFAULT_MULTICAST_LOOP = 0x1 - IPV6_DEFHLIM = 0x40 - IPV6_DONTFRAG = 0x3e - IPV6_DSTOPTS = 0x32 - IPV6_FAITH = 0x1d - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FLOW_ECN_MASK = 0x3000 - IPV6_FRAGTTL = 0x3c - IPV6_FW_ADD = 0x1e - IPV6_FW_DEL = 0x1f - IPV6_FW_FLUSH = 0x20 - IPV6_FW_GET = 0x22 - IPV6_FW_ZERO = 0x21 - IPV6_HLIMDEC = 0x1 - IPV6_HOPLIMIT = 0x2f - IPV6_HOPOPTS = 0x31 - IPV6_IPSEC_POLICY = 0x1c - IPV6_JOIN_GROUP = 0xc - IPV6_LEAVE_GROUP = 0xd - IPV6_MAXHLIM = 0xff - IPV6_MAXOPTHDR = 0x800 - IPV6_MAXPACKET = 0xffff - IPV6_MAX_GROUP_SRC_FILTER = 0x200 - IPV6_MAX_MEMBERSHIPS = 0xfff - IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f - IPV6_MMTU = 0x500 - IPV6_MSFILTER = 0x4a - IPV6_MULTICAST_HOPS = 0xa - IPV6_MULTICAST_IF = 0x9 - IPV6_MULTICAST_LOOP = 0xb - IPV6_NEXTHOP = 0x30 - IPV6_PATHMTU = 0x2c - IPV6_PKTINFO = 0x2e - IPV6_PORTRANGE = 0xe - IPV6_PORTRANGE_DEFAULT = 0x0 - IPV6_PORTRANGE_HIGH = 0x1 - IPV6_PORTRANGE_LOW = 0x2 - IPV6_PREFER_TEMPADDR = 0x3f - IPV6_RECVDSTOPTS = 0x28 - IPV6_RECVHOPLIMIT = 0x25 - IPV6_RECVHOPOPTS = 0x27 - IPV6_RECVPATHMTU = 0x2b - IPV6_RECVPKTINFO = 0x3d - IPV6_RECVRTHDR = 0x26 - IPV6_RECVTCLASS = 0x23 - IPV6_RTHDR = 0x33 - IPV6_RTHDRDSTOPTS = 0x39 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_SOCKOPT_RESERVED1 = 0x3 - IPV6_TCLASS = 0x24 - IPV6_UNICAST_HOPS = 0x4 - IPV6_USE_MIN_MTU = 0x2a - IPV6_V6ONLY = 0x1b - IPV6_VERSION = 0x60 - IPV6_VERSION_MASK = 0xf0 - IP_ADD_MEMBERSHIP = 0xc - IP_ADD_SOURCE_MEMBERSHIP = 0x46 - IP_BLOCK_SOURCE = 0x48 - IP_BOUND_IF = 0x19 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DONTFRAG = 0x1c - IP_DROP_MEMBERSHIP = 0xd - IP_DROP_SOURCE_MEMBERSHIP = 0x47 - IP_DUMMYNET_CONFIGURE = 0x3c - IP_DUMMYNET_DEL = 0x3d - IP_DUMMYNET_FLUSH = 0x3e - IP_DUMMYNET_GET = 0x40 - IP_FAITH = 0x16 - IP_FW_ADD = 0x28 - IP_FW_DEL = 0x29 - IP_FW_FLUSH = 0x2a - IP_FW_GET = 0x2c - IP_FW_RESETLOG = 0x2d - IP_FW_ZERO = 0x2b - IP_HDRINCL = 0x2 - IP_IPSEC_POLICY = 0x15 - IP_MAXPACKET = 0xffff - IP_MAX_GROUP_SRC_FILTER = 0x200 - IP_MAX_MEMBERSHIPS = 0xfff - IP_MAX_SOCK_MUTE_FILTER = 0x80 - IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MF = 0x2000 - IP_MIN_MEMBERSHIPS = 0x1f - IP_MSFILTER = 0x4a - IP_MSS = 0x240 - IP_MULTICAST_IF = 0x9 - IP_MULTICAST_IFINDEX = 0x42 - IP_MULTICAST_LOOP = 0xb - IP_MULTICAST_TTL = 0xa - IP_MULTICAST_VIF = 0xe - IP_NAT__XXX = 0x37 - IP_OFFMASK = 0x1fff - IP_OLD_FW_ADD = 0x32 - IP_OLD_FW_DEL = 0x33 - IP_OLD_FW_FLUSH = 0x34 - IP_OLD_FW_GET = 0x36 - IP_OLD_FW_RESETLOG = 0x38 - IP_OLD_FW_ZERO = 0x35 - IP_OPTIONS = 0x1 - IP_PKTINFO = 0x1a - IP_PORTRANGE = 0x13 - IP_PORTRANGE_DEFAULT = 0x0 - IP_PORTRANGE_HIGH = 0x1 - IP_PORTRANGE_LOW = 0x2 - IP_RECVDSTADDR = 0x7 - IP_RECVIF = 0x14 - IP_RECVOPTS = 0x5 - IP_RECVPKTINFO = 0x1a - IP_RECVRETOPTS = 0x6 - IP_RECVTOS = 0x1b - IP_RECVTTL = 0x18 - IP_RETOPTS = 0x8 - IP_RF = 0x8000 - IP_RSVP_OFF = 0x10 - IP_RSVP_ON = 0xf - IP_RSVP_VIF_OFF = 0x12 - IP_RSVP_VIF_ON = 0x11 - IP_STRIPHDR = 0x17 - IP_TOS = 0x3 - IP_TRAFFIC_MGT_BACKGROUND = 0x41 - IP_TTL = 0x4 - IP_UNBLOCK_SOURCE = 0x49 - ISIG = 0x80 - ISTRIP = 0x20 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x400 - IXON = 0x200 - KERN_HOSTNAME = 0xa - KERN_OSRELEASE = 0x2 - KERN_OSTYPE = 0x1 - KERN_VERSION = 0x4 - LOCAL_PEERCRED = 0x1 - LOCAL_PEEREPID = 0x3 - LOCAL_PEEREUUID = 0x5 - LOCAL_PEERPID = 0x2 - LOCAL_PEERTOKEN = 0x6 - LOCAL_PEERUUID = 0x4 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - MADV_CAN_REUSE = 0x9 - MADV_DONTNEED = 0x4 - MADV_FREE = 0x5 - MADV_FREE_REUSABLE = 0x7 - MADV_FREE_REUSE = 0x8 - MADV_NORMAL = 0x0 - MADV_PAGEOUT = 0xa - MADV_RANDOM = 0x1 - MADV_SEQUENTIAL = 0x2 - MADV_WILLNEED = 0x3 - MADV_ZERO_WIRED_PAGES = 0x6 - MAP_32BIT = 0x8000 - MAP_ANON = 0x1000 - MAP_ANONYMOUS = 0x1000 - MAP_COPY = 0x2 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_HASSEMAPHORE = 0x200 - MAP_JIT = 0x800 - MAP_NOCACHE = 0x400 - MAP_NOEXTEND = 0x100 - MAP_NORESERVE = 0x40 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x20 - MAP_RESERVED0080 = 0x80 - MAP_RESILIENT_CODESIGN = 0x2000 - MAP_RESILIENT_MEDIA = 0x4000 - MAP_SHARED = 0x1 - MAP_TRANSLATED_ALLOW_EXECUTE = 0x20000 - MAP_UNIX03 = 0x40000 - MCAST_BLOCK_SOURCE = 0x54 - MCAST_EXCLUDE = 0x2 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x50 - MCAST_JOIN_SOURCE_GROUP = 0x52 - MCAST_LEAVE_GROUP = 0x51 - MCAST_LEAVE_SOURCE_GROUP = 0x53 - MCAST_UNBLOCK_SOURCE = 0x55 - MCAST_UNDEFINED = 0x0 - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MNT_ASYNC = 0x40 - MNT_AUTOMOUNTED = 0x400000 - MNT_CMDFLAGS = 0xf0000 - MNT_CPROTECT = 0x80 - MNT_DEFWRITE = 0x2000000 - MNT_DONTBROWSE = 0x100000 - MNT_DOVOLFS = 0x8000 - MNT_DWAIT = 0x4 - MNT_EXPORTED = 0x100 - MNT_EXT_ROOT_DATA_VOL = 0x1 - MNT_FORCE = 0x80000 - MNT_IGNORE_OWNERSHIP = 0x200000 - MNT_JOURNALED = 0x800000 - MNT_LOCAL = 0x1000 - MNT_MULTILABEL = 0x4000000 - MNT_NOATIME = 0x10000000 - MNT_NOBLOCK = 0x20000 - MNT_NODEV = 0x10 - MNT_NOEXEC = 0x4 - MNT_NOSUID = 0x8 - MNT_NOUSERXATTR = 0x1000000 - MNT_NOWAIT = 0x2 - MNT_QUARANTINE = 0x400 - MNT_QUOTA = 0x2000 - MNT_RDONLY = 0x1 - MNT_RELOAD = 0x40000 - MNT_REMOVABLE = 0x200 - MNT_ROOTFS = 0x4000 - MNT_SNAPSHOT = 0x40000000 - MNT_STRICTATIME = 0x80000000 - MNT_SYNCHRONOUS = 0x2 - MNT_UNION = 0x20 - MNT_UNKNOWNPERMISSIONS = 0x200000 - MNT_UPDATE = 0x10000 - MNT_VISFLAGMASK = 0xd7f0f7ff - MNT_WAIT = 0x1 - MSG_CTRUNC = 0x20 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x80 - MSG_EOF = 0x100 - MSG_EOR = 0x8 - MSG_FLUSH = 0x400 - MSG_HAVEMORE = 0x2000 - MSG_HOLD = 0x800 - MSG_NEEDSA = 0x10000 - MSG_NOSIGNAL = 0x80000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_RCVMORE = 0x4000 - MSG_SEND = 0x1000 - MSG_TRUNC = 0x10 - MSG_WAITALL = 0x40 - MSG_WAITSTREAM = 0x200 - MS_ASYNC = 0x1 - MS_DEACTIVATE = 0x8 - MS_INVALIDATE = 0x2 - MS_KILLPAGES = 0x4 - MS_SYNC = 0x10 - NAME_MAX = 0xff - NET_RT_DUMP = 0x1 - NET_RT_DUMP2 = 0x7 - NET_RT_FLAGS = 0x2 - NET_RT_FLAGS_PRIV = 0xa - NET_RT_IFLIST = 0x3 - NET_RT_IFLIST2 = 0x6 - NET_RT_MAXID = 0xb - NET_RT_STAT = 0x4 - NET_RT_TRASH = 0x5 - NFDBITS = 0x20 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 - NLDLY = 0x300 - NOFLSH = 0x80000000 - NOKERNINFO = 0x2000000 - NOTE_ABSOLUTE = 0x8 - NOTE_ATTRIB = 0x8 - NOTE_BACKGROUND = 0x40 - NOTE_CHILD = 0x4 - NOTE_CRITICAL = 0x20 - NOTE_DELETE = 0x1 - NOTE_EXEC = 0x20000000 - NOTE_EXIT = 0x80000000 - NOTE_EXITSTATUS = 0x4000000 - NOTE_EXIT_CSERROR = 0x40000 - NOTE_EXIT_DECRYPTFAIL = 0x10000 - NOTE_EXIT_DETAIL = 0x2000000 - NOTE_EXIT_DETAIL_MASK = 0x70000 - NOTE_EXIT_MEMORY = 0x20000 - NOTE_EXIT_REPARENTED = 0x80000 - NOTE_EXTEND = 0x4 - NOTE_FFAND = 0x40000000 - NOTE_FFCOPY = 0xc0000000 - NOTE_FFCTRLMASK = 0xc0000000 - NOTE_FFLAGSMASK = 0xffffff - NOTE_FFNOP = 0x0 - NOTE_FFOR = 0x80000000 - NOTE_FORK = 0x40000000 - NOTE_FUNLOCK = 0x100 - NOTE_LEEWAY = 0x10 - NOTE_LINK = 0x10 - NOTE_LOWAT = 0x1 - NOTE_MACHTIME = 0x100 - NOTE_MACH_CONTINUOUS_TIME = 0x80 - NOTE_NONE = 0x80 - NOTE_NSECONDS = 0x4 - NOTE_OOB = 0x2 - NOTE_PCTRLMASK = -0x100000 - NOTE_PDATAMASK = 0xfffff - NOTE_REAP = 0x10000000 - NOTE_RENAME = 0x20 - NOTE_REVOKE = 0x40 - NOTE_SECONDS = 0x1 - NOTE_SIGNAL = 0x8000000 - NOTE_TRACK = 0x1 - NOTE_TRACKERR = 0x2 - NOTE_TRIGGER = 0x1000000 - NOTE_USECONDS = 0x2 - NOTE_VM_ERROR = 0x10000000 - NOTE_VM_PRESSURE = 0x80000000 - NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 - NOTE_VM_PRESSURE_TERMINATE = 0x40000000 - NOTE_WRITE = 0x2 - OCRNL = 0x10 - OFDEL = 0x20000 - OFILL = 0x80 - ONLCR = 0x2 - ONLRET = 0x40 - ONOCR = 0x20 - ONOEOT = 0x8 - OPOST = 0x1 - OXTABS = 0x4 - O_ACCMODE = 0x3 - O_ALERT = 0x20000000 - O_APPEND = 0x8 - O_ASYNC = 0x40 - O_CLOEXEC = 0x1000000 - O_CREAT = 0x200 - O_DIRECTORY = 0x100000 - O_DP_GETRAWENCRYPTED = 0x1 - O_DP_GETRAWUNENCRYPTED = 0x2 - O_DSYNC = 0x400000 - O_EVTONLY = 0x8000 - O_EXCL = 0x800 - O_EXLOCK = 0x20 - O_FSYNC = 0x80 - O_NDELAY = 0x4 - O_NOCTTY = 0x20000 - O_NOFOLLOW = 0x100 - O_NOFOLLOW_ANY = 0x20000000 - O_NONBLOCK = 0x4 - O_POPUP = 0x80000000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_SHLOCK = 0x10 - O_SYMLINK = 0x200000 - O_SYNC = 0x80 - O_TRUNC = 0x400 - O_WRONLY = 0x1 - PARENB = 0x1000 - PARMRK = 0x8 - PARODD = 0x2000 - PENDIN = 0x20000000 - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROT_EXEC = 0x4 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PT_ATTACH = 0xa - PT_ATTACHEXC = 0xe - PT_CONTINUE = 0x7 - PT_DENY_ATTACH = 0x1f - PT_DETACH = 0xb - PT_FIRSTMACH = 0x20 - PT_FORCEQUOTA = 0x1e - PT_KILL = 0x8 - PT_READ_D = 0x2 - PT_READ_I = 0x1 - PT_READ_U = 0x3 - PT_SIGEXC = 0xc - PT_STEP = 0x9 - PT_THUPDATE = 0xd - PT_TRACE_ME = 0x0 - PT_WRITE_D = 0x5 - PT_WRITE_I = 0x4 - PT_WRITE_U = 0x6 - RLIMIT_AS = 0x5 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_CPU_USAGE_MONITOR = 0x2 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_MEMLOCK = 0x6 - RLIMIT_NOFILE = 0x8 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0x7fffffffffffffff - RTAX_AUTHOR = 0x6 - RTAX_BRD = 0x7 - RTAX_DST = 0x0 - RTAX_GATEWAY = 0x1 - RTAX_GENMASK = 0x3 - RTAX_IFA = 0x5 - RTAX_IFP = 0x4 - RTAX_MAX = 0x8 - RTAX_NETMASK = 0x2 - RTA_AUTHOR = 0x40 - RTA_BRD = 0x80 - RTA_DST = 0x1 - RTA_GATEWAY = 0x2 - RTA_GENMASK = 0x8 - RTA_IFA = 0x20 - RTA_IFP = 0x10 - RTA_NETMASK = 0x4 - RTF_BLACKHOLE = 0x1000 - RTF_BROADCAST = 0x400000 - RTF_CLONING = 0x100 - RTF_CONDEMNED = 0x2000000 - RTF_DEAD = 0x20000000 - RTF_DELCLONE = 0x80 - RTF_DONE = 0x40 - RTF_DYNAMIC = 0x10 - RTF_GATEWAY = 0x2 - RTF_GLOBAL = 0x40000000 - RTF_HOST = 0x4 - RTF_IFREF = 0x4000000 - RTF_IFSCOPE = 0x1000000 - RTF_LLDATA = 0x400 - RTF_LLINFO = 0x400 - RTF_LOCAL = 0x200000 - RTF_MODIFIED = 0x20 - RTF_MULTICAST = 0x800000 - RTF_NOIFREF = 0x2000 - RTF_PINNED = 0x100000 - RTF_PRCLONING = 0x10000 - RTF_PROTO1 = 0x8000 - RTF_PROTO2 = 0x4000 - RTF_PROTO3 = 0x40000 - RTF_PROXY = 0x8000000 - RTF_REJECT = 0x8 - RTF_ROUTER = 0x10000000 - RTF_STATIC = 0x800 - RTF_UP = 0x1 - RTF_WASCLONED = 0x20000 - RTF_XRESOLVE = 0x200 - RTM_ADD = 0x1 - RTM_CHANGE = 0x3 - RTM_DELADDR = 0xd - RTM_DELETE = 0x2 - RTM_DELMADDR = 0x10 - RTM_GET = 0x4 - RTM_GET2 = 0x14 - RTM_IFINFO = 0xe - RTM_IFINFO2 = 0x12 - RTM_LOCK = 0x8 - RTM_LOSING = 0x5 - RTM_MISS = 0x7 - RTM_NEWADDR = 0xc - RTM_NEWMADDR = 0xf - RTM_NEWMADDR2 = 0x13 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RTM_REDIRECT = 0x6 - RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 - RTM_VERSION = 0x5 - RTV_EXPIRE = 0x4 - RTV_HOPCOUNT = 0x2 - RTV_MTU = 0x1 - RTV_RPIPE = 0x8 - RTV_RTT = 0x40 - RTV_RTTVAR = 0x80 - RTV_SPIPE = 0x10 - RTV_SSTHRESH = 0x20 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - SCM_CREDS = 0x3 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x2 - SCM_TIMESTAMP_MONOTONIC = 0x4 - SEEK_CUR = 0x1 - SEEK_DATA = 0x4 - SEEK_END = 0x2 - SEEK_HOLE = 0x3 - SEEK_SET = 0x0 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDMULTI = 0x80206931 - SIOCAIFADDR = 0x8040691a - SIOCARPIPLL = 0xc0206928 - SIOCATMARK = 0x40047307 - SIOCAUTOADDR = 0xc0206926 - SIOCAUTONETMASK = 0x80206927 - SIOCDELMULTI = 0x80206932 - SIOCDIFADDR = 0x80206919 - SIOCDIFPHYADDR = 0x80206941 - SIOCGDRVSPEC = 0xc028697b - SIOCGETVLAN = 0xc020697f - SIOCGHIWAT = 0x40047301 - SIOCGIF6LOWPAN = 0xc02069c5 - SIOCGIFADDR = 0xc0206921 - SIOCGIFALTMTU = 0xc0206948 - SIOCGIFASYNCMAP = 0xc020697c - SIOCGIFBOND = 0xc0206947 - SIOCGIFBRDADDR = 0xc0206923 - SIOCGIFCAP = 0xc020695b - SIOCGIFCONF = 0xc00c6924 - SIOCGIFDEVMTU = 0xc0206944 - SIOCGIFDSTADDR = 0xc0206922 - SIOCGIFFLAGS = 0xc0206911 - SIOCGIFFUNCTIONALTYPE = 0xc02069ad - SIOCGIFGENERIC = 0xc020693a - SIOCGIFKPI = 0xc0206987 - SIOCGIFMAC = 0xc0206982 - SIOCGIFMEDIA = 0xc02c6938 - SIOCGIFMETRIC = 0xc0206917 - SIOCGIFMTU = 0xc0206933 - SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206940 - SIOCGIFPHYS = 0xc0206935 - SIOCGIFPSRCADDR = 0xc020693f - SIOCGIFSTATUS = 0xc331693d - SIOCGIFVLAN = 0xc020697f - SIOCGIFWAKEFLAGS = 0xc0206988 - SIOCGIFXMEDIA = 0xc02c6948 - SIOCGLOWAT = 0x40047303 - SIOCGPGRP = 0x40047309 - SIOCIFCREATE = 0xc0206978 - SIOCIFCREATE2 = 0xc020697a - SIOCIFDESTROY = 0x80206979 - SIOCIFGCLONERS = 0xc0106981 - SIOCRSLVMULTI = 0xc010693b - SIOCSDRVSPEC = 0x8028697b - SIOCSETVLAN = 0x8020697e - SIOCSHIWAT = 0x80047300 - SIOCSIF6LOWPAN = 0x802069c4 - SIOCSIFADDR = 0x8020690c - SIOCSIFALTMTU = 0x80206945 - SIOCSIFASYNCMAP = 0x8020697d - SIOCSIFBOND = 0x80206946 - SIOCSIFBRDADDR = 0x80206913 - SIOCSIFCAP = 0x8020695a - SIOCSIFDSTADDR = 0x8020690e - SIOCSIFFLAGS = 0x80206910 - SIOCSIFGENERIC = 0x80206939 - SIOCSIFKPI = 0x80206986 - SIOCSIFLLADDR = 0x8020693c - SIOCSIFMAC = 0x80206983 - SIOCSIFMEDIA = 0xc0206937 - SIOCSIFMETRIC = 0x80206918 - SIOCSIFMTU = 0x80206934 - SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x8040693e - SIOCSIFPHYS = 0x80206936 - SIOCSIFVLAN = 0x8020697e - SIOCSLOWAT = 0x80047302 - SIOCSPGRP = 0x80047308 - SOCK_DGRAM = 0x2 - SOCK_MAXADDRLEN = 0xff - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_LOCAL = 0x0 - SOL_SOCKET = 0xffff - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x2 - SO_BROADCAST = 0x20 - SO_DEBUG = 0x1 - SO_DONTROUTE = 0x10 - SO_DONTTRUNC = 0x2000 - SO_ERROR = 0x1007 - SO_KEEPALIVE = 0x8 - SO_LABEL = 0x1010 - SO_LINGER = 0x80 - SO_LINGER_SEC = 0x1080 - SO_NETSVC_MARKING_LEVEL = 0x1119 - SO_NET_SERVICE_TYPE = 0x1116 - SO_NKE = 0x1021 - SO_NOADDRERR = 0x1023 - SO_NOSIGPIPE = 0x1022 - SO_NOTIFYCONFLICT = 0x1026 - SO_NP_EXTENSIONS = 0x1083 - SO_NREAD = 0x1020 - SO_NUMRCVPKT = 0x1112 - SO_NWRITE = 0x1024 - SO_OOBINLINE = 0x100 - SO_PEERLABEL = 0x1011 - SO_RANDOMPORT = 0x1082 - SO_RCVBUF = 0x1002 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_REUSESHAREUID = 0x1025 - SO_SNDBUF = 0x1001 - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_TIMESTAMP = 0x400 - SO_TIMESTAMP_MONOTONIC = 0x800 - SO_TYPE = 0x1008 - SO_UPCALLCLOSEWAIT = 0x1027 - SO_USELOOPBACK = 0x40 - SO_WANTMORE = 0x4000 - SO_WANTOOBFLAG = 0x8000 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IFWHT = 0xe000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISTXT = 0x200 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0x4 - TABDLY = 0xc04 - TCIFLUSH = 0x1 - TCIOFF = 0x3 - TCIOFLUSH = 0x3 - TCION = 0x4 - TCOFLUSH = 0x2 - TCOOFF = 0x1 - TCOON = 0x2 - TCP_CONNECTIONTIMEOUT = 0x20 - TCP_CONNECTION_INFO = 0x106 - TCP_ENABLE_ECN = 0x104 - TCP_FASTOPEN = 0x105 - TCP_KEEPALIVE = 0x10 - TCP_KEEPCNT = 0x102 - TCP_KEEPINTVL = 0x101 - TCP_MAXHLEN = 0x3c - TCP_MAXOLEN = 0x28 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_SACK = 0x4 - TCP_MAX_WINSHIFT = 0xe - TCP_MINMSS = 0xd8 - TCP_MSS = 0x200 - TCP_NODELAY = 0x1 - TCP_NOOPT = 0x8 - TCP_NOPUSH = 0x4 - TCP_NOTSENT_LOWAT = 0x201 - TCP_RXT_CONNDROPTIME = 0x80 - TCP_RXT_FINDROP = 0x100 - TCP_SENDMOREACKS = 0x103 - TCSAFLUSH = 0x2 - TIOCCBRK = 0x2000747a - TIOCCDTR = 0x20007478 - TIOCCONS = 0x80047462 - TIOCDCDTIMESTAMP = 0x40107458 - TIOCDRAIN = 0x2000745e - TIOCDSIMICROCODE = 0x20007455 - TIOCEXCL = 0x2000740d - TIOCEXT = 0x80047460 - TIOCFLUSH = 0x80047410 - TIOCGDRAINWAIT = 0x40047456 - TIOCGETA = 0x40487413 - TIOCGETD = 0x4004741a - TIOCGPGRP = 0x40047477 - TIOCGWINSZ = 0x40087468 - TIOCIXOFF = 0x20007480 - TIOCIXON = 0x20007481 - TIOCMBIC = 0x8004746b - TIOCMBIS = 0x8004746c - TIOCMGDTRWAIT = 0x4004745a - TIOCMGET = 0x4004746a - TIOCMODG = 0x40047403 - TIOCMODS = 0x80047404 - TIOCMSDTRWAIT = 0x8004745b - TIOCMSET = 0x8004746d - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x20007471 - TIOCNXCL = 0x2000740e - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x80047470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCPTYGNAME = 0x40807453 - TIOCPTYGRANT = 0x20007454 - TIOCPTYUNLK = 0x20007452 - TIOCREMOTE = 0x80047469 - TIOCSBRK = 0x2000747b - TIOCSCONS = 0x20007463 - TIOCSCTTY = 0x20007461 - TIOCSDRAINWAIT = 0x80047457 - TIOCSDTR = 0x20007479 - TIOCSETA = 0x80487414 - TIOCSETAF = 0x80487416 - TIOCSETAW = 0x80487415 - TIOCSETD = 0x8004741b - TIOCSIG = 0x2000745f - TIOCSPGRP = 0x80047476 - TIOCSTART = 0x2000746e - TIOCSTAT = 0x20007465 - TIOCSTI = 0x80017472 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCTIMESTAMP = 0x40107459 - TIOCUCNTL = 0x80047466 - TOSTOP = 0x400000 - VDISCARD = 0xf - VDSUSP = 0xb - VEOF = 0x0 - VEOL = 0x1 - VEOL2 = 0x2 - VERASE = 0x3 - VINTR = 0x8 - VKILL = 0x5 - VLNEXT = 0xe - VMIN = 0x10 - VM_LOADAVG = 0x2 - VM_MACHFACTOR = 0x4 - VM_MAXID = 0x6 - VM_METER = 0x1 - VM_SWAPUSAGE = 0x5 - VQUIT = 0x9 - VREPRINT = 0x6 - VSTART = 0xc - VSTATUS = 0x12 - VSTOP = 0xd - VSUSP = 0xa - VT0 = 0x0 - VT1 = 0x10000 - VTDLY = 0x10000 - VTIME = 0x11 - VWERASE = 0x4 - WCONTINUED = 0x10 - WCOREFLAG = 0x80 - WEXITED = 0x4 - WNOHANG = 0x1 - WNOWAIT = 0x20 - WORDSIZE = 0x40 - WSTOPPED = 0x8 - WUNTRACED = 0x2 - XATTR_CREATE = 0x2 - XATTR_NODEFAULT = 0x10 - XATTR_NOFOLLOW = 0x1 - XATTR_NOSECURITY = 0x8 - XATTR_REPLACE = 0x4 - XATTR_SHOWCOMPRESSION = 0x20 + AF_APPLETALK = 0x10 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1c + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1e + AF_IPX = 0x17 + AF_ISDN = 0x1c + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x29 + AF_NATM = 0x1f + AF_NDRV = 0x1b + AF_NETBIOS = 0x21 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PPP = 0x22 + AF_PUP = 0x4 + AF_RESERVED_36 = 0x24 + AF_ROUTE = 0x11 + AF_SIP = 0x18 + AF_SNA = 0xb + AF_SYSTEM = 0x20 + AF_SYS_CONTROL = 0x2 + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_UTUN = 0x26 + AF_VSOCK = 0x28 + ALTWERASE = 0x200 + ATTR_BIT_MAP_COUNT = 0x5 + ATTR_CMN_ACCESSMASK = 0x20000 + ATTR_CMN_ACCTIME = 0x1000 + ATTR_CMN_ADDEDTIME = 0x10000000 + ATTR_CMN_BKUPTIME = 0x2000 + ATTR_CMN_CHGTIME = 0x800 + ATTR_CMN_CRTIME = 0x200 + ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000 + ATTR_CMN_DEVID = 0x2 + ATTR_CMN_DOCUMENT_ID = 0x100000 + ATTR_CMN_ERROR = 0x20000000 + ATTR_CMN_EXTENDED_SECURITY = 0x400000 + ATTR_CMN_FILEID = 0x2000000 + ATTR_CMN_FLAGS = 0x40000 + ATTR_CMN_FNDRINFO = 0x4000 + ATTR_CMN_FSID = 0x4 + ATTR_CMN_FULLPATH = 0x8000000 + ATTR_CMN_GEN_COUNT = 0x80000 + ATTR_CMN_GRPID = 0x10000 + ATTR_CMN_GRPUUID = 0x1000000 + ATTR_CMN_MODTIME = 0x400 + ATTR_CMN_NAME = 0x1 + ATTR_CMN_NAMEDATTRCOUNT = 0x80000 + ATTR_CMN_NAMEDATTRLIST = 0x100000 + ATTR_CMN_OBJID = 0x20 + ATTR_CMN_OBJPERMANENTID = 0x40 + ATTR_CMN_OBJTAG = 0x10 + ATTR_CMN_OBJTYPE = 0x8 + ATTR_CMN_OWNERID = 0x8000 + ATTR_CMN_PARENTID = 0x4000000 + ATTR_CMN_PAROBJID = 0x80 + ATTR_CMN_RETURNED_ATTRS = 0x80000000 + ATTR_CMN_SCRIPT = 0x100 + ATTR_CMN_SETMASK = 0x51c7ff00 + ATTR_CMN_USERACCESS = 0x200000 + ATTR_CMN_UUID = 0x800000 + ATTR_CMN_VALIDMASK = 0xffffffff + ATTR_CMN_VOLSETMASK = 0x6700 + ATTR_FILE_ALLOCSIZE = 0x4 + ATTR_FILE_CLUMPSIZE = 0x10 + ATTR_FILE_DATAALLOCSIZE = 0x400 + ATTR_FILE_DATAEXTENTS = 0x800 + ATTR_FILE_DATALENGTH = 0x200 + ATTR_FILE_DEVTYPE = 0x20 + ATTR_FILE_FILETYPE = 0x40 + ATTR_FILE_FORKCOUNT = 0x80 + ATTR_FILE_FORKLIST = 0x100 + ATTR_FILE_IOBLOCKSIZE = 0x8 + ATTR_FILE_LINKCOUNT = 0x1 + ATTR_FILE_RSRCALLOCSIZE = 0x2000 + ATTR_FILE_RSRCEXTENTS = 0x4000 + ATTR_FILE_RSRCLENGTH = 0x1000 + ATTR_FILE_SETMASK = 0x20 + ATTR_FILE_TOTALSIZE = 0x2 + ATTR_FILE_VALIDMASK = 0x37ff + ATTR_VOL_ALLOCATIONCLUMP = 0x40 + ATTR_VOL_ATTRIBUTES = 0x40000000 + ATTR_VOL_CAPABILITIES = 0x20000 + ATTR_VOL_DIRCOUNT = 0x400 + ATTR_VOL_ENCODINGSUSED = 0x10000 + ATTR_VOL_FILECOUNT = 0x200 + ATTR_VOL_FSTYPE = 0x1 + ATTR_VOL_INFO = 0x80000000 + ATTR_VOL_IOBLOCKSIZE = 0x80 + ATTR_VOL_MAXOBJCOUNT = 0x800 + ATTR_VOL_MINALLOCATION = 0x20 + ATTR_VOL_MOUNTEDDEVICE = 0x8000 + ATTR_VOL_MOUNTFLAGS = 0x4000 + ATTR_VOL_MOUNTPOINT = 0x1000 + ATTR_VOL_NAME = 0x2000 + ATTR_VOL_OBJCOUNT = 0x100 + ATTR_VOL_QUOTA_SIZE = 0x10000000 + ATTR_VOL_RESERVED_SIZE = 0x20000000 + ATTR_VOL_SETMASK = 0x80002000 + ATTR_VOL_SIGNATURE = 0x2 + ATTR_VOL_SIZE = 0x4 + ATTR_VOL_SPACEAVAIL = 0x10 + ATTR_VOL_SPACEFREE = 0x8 + ATTR_VOL_SPACEUSED = 0x800000 + ATTR_VOL_UUID = 0x40000 + ATTR_VOL_VALIDMASK = 0xf087ffff + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc00c4279 + BIOCGETIF = 0x4020426b + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDLT = 0x80044278 + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x8010427e + BIOCSETIF = 0x8020426c + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x8000 + BSDLY = 0x8000 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_MONOTONIC = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_MONOTONIC_RAW_APPROX = 0x5 + CLOCK_PROCESS_CPUTIME_ID = 0xc + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x10 + CLOCK_UPTIME_RAW = 0x8 + CLOCK_UPTIME_RAW_APPROX = 0x9 + CLONE_NOFOLLOW = 0x1 + CLONE_NOOWNERCOPY = 0x2 + CR0 = 0x0 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTLIOCGINFO = 0xc0644e03 + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0x10a + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_DARWIN = 0x10a + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_EXCEPT = -0xf + EVFILT_FS = -0x9 + EVFILT_MACHPORT = -0x8 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x11 + EVFILT_THREADMARKER = 0x11 + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xa + EVFILT_VM = -0xc + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DISPATCH2 = 0x180 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG0 = 0x1000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_OOBAND = 0x2000 + EV_POLL = 0x1000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EV_UDATA_SPECIFIC = 0x100 + EV_VANISHED = 0x200 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x4000 + FFDLY = 0x4000 + FLUSHO = 0x800000 + FSOPT_ATTR_CMN_EXTENDED = 0x20 + FSOPT_NOFOLLOW = 0x1 + FSOPT_NOINMEMUPDATE = 0x2 + FSOPT_PACK_INVAL_ATTRS = 0x8 + FSOPT_REPORT_FULLSIZE = 0x4 + FSOPT_RETURN_REALDEV = 0x200 + F_ADDFILESIGS = 0x3d + F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 + F_ADDFILESIGS_INFO = 0x67 + F_ADDFILESIGS_RETURN = 0x61 + F_ADDFILESUPPL = 0x68 + F_ADDSIGS = 0x3b + F_ALLOCATEALL = 0x4 + F_ALLOCATECONTIG = 0x2 + F_BARRIERFSYNC = 0x55 + F_CHECK_LV = 0x62 + F_CHKCLEAN = 0x29 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x43 + F_FINDSIGS = 0x4e + F_FLUSH_DATA = 0x28 + F_FREEZE_FS = 0x35 + F_FULLFSYNC = 0x33 + F_GETCODEDIR = 0x48 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETLKPID = 0x42 + F_GETNOSIGPIPE = 0x4a + F_GETOWN = 0x5 + F_GETPATH = 0x32 + F_GETPATH_MTMINFO = 0x47 + F_GETPATH_NOFIRMLINK = 0x66 + F_GETPROTECTIONCLASS = 0x3f + F_GETPROTECTIONLEVEL = 0x4d + F_GETSIGSINFO = 0x69 + F_GLOBAL_NOCACHE = 0x37 + F_LOG2PHYS = 0x31 + F_LOG2PHYS_EXT = 0x41 + F_NOCACHE = 0x30 + F_NODIRECT = 0x3e + F_OK = 0x0 + F_PATHPKG_CHECK = 0x34 + F_PEOFPOSMODE = 0x3 + F_PREALLOCATE = 0x2a + F_PUNCHHOLE = 0x63 + F_RDADVISE = 0x2c + F_RDAHEAD = 0x2d + F_RDLCK = 0x1 + F_SETBACKINGSTORE = 0x46 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETLKWTIMEOUT = 0xa + F_SETNOSIGPIPE = 0x49 + F_SETOWN = 0x6 + F_SETPROTECTIONCLASS = 0x40 + F_SETSIZE = 0x2b + F_SINGLE_WRITER = 0x4c + F_SPECULATIVE_READ = 0x65 + F_THAW_FS = 0x36 + F_TRANSCODEKEY = 0x4b + F_TRIM_ACTIVE_FILE = 0x64 + F_UNLCK = 0x2 + F_VOLPOSMODE = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_6LOWPAN = 0x40 + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_CELLULAR = 0xff + IFT_CEPT = 0x13 + IFT_DS3 = 0x1e + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FAITH = 0x38 + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_GIF = 0x37 + IFT_HDH1822 = 0x3 + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IEEE1394 = 0x90 + IFT_IEEE8023ADLAG = 0x88 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_L2VLAN = 0x87 + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PDP = 0xff + IFT_PFLOG = 0xf5 + IFT_PFSYNC = 0xf6 + IFT_PKTAP = 0xfe + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_STARLAN = 0xb + IFT_STF = 0x39 + IFT_T1 = 0x12 + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LINKLOCALNETNUM = 0xa9fe0000 + IN_LOOPBACKNET = 0x7f + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x400473d1 + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0xfe + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEP = 0x21 + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_2292DSTOPTS = 0x17 + IPV6_2292HOPLIMIT = 0x14 + IPV6_2292HOPOPTS = 0x16 + IPV6_2292NEXTHOP = 0x15 + IPV6_2292PKTINFO = 0x13 + IPV6_2292PKTOPTIONS = 0x19 + IPV6_2292RTHDR = 0x18 + IPV6_3542DSTOPTS = 0x32 + IPV6_3542HOPLIMIT = 0x2f + IPV6_3542HOPOPTS = 0x31 + IPV6_3542NEXTHOP = 0x30 + IPV6_3542PKTINFO = 0x2e + IPV6_3542RTHDR = 0x33 + IPV6_ADDR_MC_FLAGS_PREFIX = 0x20 + IPV6_ADDR_MC_FLAGS_TRANSIENT = 0x10 + IPV6_ADDR_MC_FLAGS_UNICAST_BASED = 0x30 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDV6ONLY = 0x1b + IPV6_BOUND_IF = 0x7d + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FLOW_ECN_MASK = 0x3000 + IPV6_FRAGTTL = 0x3c + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x3d + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x39 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x24 + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BLOCK_SOURCE = 0x48 + IP_BOUND_IF = 0x19 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x1c + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW_ADD = 0x28 + IP_FW_DEL = 0x29 + IP_FW_FLUSH = 0x2a + IP_FW_GET = 0x2c + IP_FW_RESETLOG = 0x2d + IP_FW_ZERO = 0x2b + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_IFINDEX = 0x42 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_NAT__XXX = 0x37 + IP_OFFMASK = 0x1fff + IP_OLD_FW_ADD = 0x32 + IP_OLD_FW_DEL = 0x33 + IP_OLD_FW_FLUSH = 0x34 + IP_OLD_FW_GET = 0x36 + IP_OLD_FW_RESETLOG = 0x38 + IP_OLD_FW_ZERO = 0x35 + IP_OPTIONS = 0x1 + IP_PKTINFO = 0x1a + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVPKTINFO = 0x1a + IP_RECVRETOPTS = 0x6 + IP_RECVTOS = 0x1b + IP_RECVTTL = 0x18 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_STRIPHDR = 0x17 + IP_TOS = 0x3 + IP_TRAFFIC_MGT_BACKGROUND = 0x41 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCAL_PEERCRED = 0x1 + LOCAL_PEEREPID = 0x3 + LOCAL_PEEREUUID = 0x5 + LOCAL_PEERPID = 0x2 + LOCAL_PEERTOKEN = 0x6 + LOCAL_PEERUUID = 0x4 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_CAN_REUSE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_FREE_REUSABLE = 0x7 + MADV_FREE_REUSE = 0x8 + MADV_NORMAL = 0x0 + MADV_PAGEOUT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MADV_ZERO_WIRED_PAGES = 0x6 + MAP_32BIT = 0x8000 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_JIT = 0x800 + MAP_NOCACHE = 0x400 + MAP_NOEXTEND = 0x100 + MAP_NORESERVE = 0x40 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_RESILIENT_CODESIGN = 0x2000 + MAP_RESILIENT_MEDIA = 0x4000 + MAP_SHARED = 0x1 + MAP_TRANSLATED_ALLOW_EXECUTE = 0x20000 + MAP_UNIX03 = 0x40000 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x400000 + MNT_CMDFLAGS = 0xf0000 + MNT_CPROTECT = 0x80 + MNT_DEFWRITE = 0x2000000 + MNT_DONTBROWSE = 0x100000 + MNT_DOVOLFS = 0x8000 + MNT_DWAIT = 0x4 + MNT_EXPORTED = 0x100 + MNT_EXT_ROOT_DATA_VOL = 0x1 + MNT_FORCE = 0x80000 + MNT_IGNORE_OWNERSHIP = 0x200000 + MNT_JOURNALED = 0x800000 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NOATIME = 0x10000000 + MNT_NOBLOCK = 0x20000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOSUID = 0x8 + MNT_NOUSERXATTR = 0x1000000 + MNT_NOWAIT = 0x2 + MNT_QUARANTINE = 0x400 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_REMOVABLE = 0x200 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x40000000 + MNT_STRICTATIME = 0x80000000 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UNKNOWNPERMISSIONS = 0x200000 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0xd7f0f7ff + MNT_WAIT = 0x1 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_FLUSH = 0x400 + MSG_HAVEMORE = 0x2000 + MSG_HOLD = 0x800 + MSG_NEEDSA = 0x10000 + MSG_NOSIGNAL = 0x80000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_RCVMORE = 0x4000 + MSG_SEND = 0x1000 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITSTREAM = 0x200 + MS_ASYNC = 0x1 + MS_DEACTIVATE = 0x8 + MS_INVALIDATE = 0x2 + MS_KILLPAGES = 0x4 + MS_SYNC = 0x10 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_DUMP2 = 0x7 + NET_RT_FLAGS = 0x2 + NET_RT_FLAGS_PRIV = 0xa + NET_RT_IFLIST = 0x3 + NET_RT_IFLIST2 = 0x6 + NET_RT_MAXID = 0xb + NET_RT_STAT = 0x4 + NET_RT_TRASH = 0x5 + NFDBITS = 0x20 + NL0 = 0x0 + NL1 = 0x100 + NL2 = 0x200 + NL3 = 0x300 + NLDLY = 0x300 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ABSOLUTE = 0x8 + NOTE_ATTRIB = 0x8 + NOTE_BACKGROUND = 0x40 + NOTE_CHILD = 0x4 + NOTE_CRITICAL = 0x20 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXITSTATUS = 0x4000000 + NOTE_EXIT_CSERROR = 0x40000 + NOTE_EXIT_DECRYPTFAIL = 0x10000 + NOTE_EXIT_DETAIL = 0x2000000 + NOTE_EXIT_DETAIL_MASK = 0x70000 + NOTE_EXIT_MEMORY = 0x20000 + NOTE_EXIT_REPARENTED = 0x80000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_FUNLOCK = 0x100 + NOTE_LEEWAY = 0x10 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MACHTIME = 0x100 + NOTE_MACH_CONTINUOUS_TIME = 0x80 + NOTE_NONE = 0x80 + NOTE_NSECONDS = 0x4 + NOTE_OOB = 0x2 + NOTE_PCTRLMASK = -0x100000 + NOTE_PDATAMASK = 0xfffff + NOTE_REAP = 0x10000000 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_SIGNAL = 0x8000000 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x2 + NOTE_VM_ERROR = 0x10000000 + NOTE_VM_PRESSURE = 0x80000000 + NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 + NOTE_VM_PRESSURE_TERMINATE = 0x40000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OFDEL = 0x20000 + OFILL = 0x80 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_ALERT = 0x20000000 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x1000000 + O_CREAT = 0x200 + O_DIRECTORY = 0x100000 + O_DP_GETRAWENCRYPTED = 0x1 + O_DP_GETRAWUNENCRYPTED = 0x2 + O_DSYNC = 0x400000 + O_EVTONLY = 0x8000 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x20000 + O_NOFOLLOW = 0x100 + O_NOFOLLOW_ANY = 0x20000000 + O_NONBLOCK = 0x4 + O_POPUP = 0x80000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYMLINK = 0x200000 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PT_ATTACH = 0xa + PT_ATTACHEXC = 0xe + PT_CONTINUE = 0x7 + PT_DENY_ATTACH = 0x1f + PT_DETACH = 0xb + PT_FIRSTMACH = 0x20 + PT_FORCEQUOTA = 0x1e + PT_KILL = 0x8 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_READ_U = 0x3 + PT_SIGEXC = 0xc + PT_STEP = 0x9 + PT_THUPDATE = 0xd + PT_TRACE_ME = 0x0 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + PT_WRITE_U = 0x6 + RLIMIT_AS = 0x5 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_CPU_USAGE_MONITOR = 0x2 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CLONING = 0x100 + RTF_CONDEMNED = 0x2000000 + RTF_DEAD = 0x20000000 + RTF_DELCLONE = 0x80 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_GATEWAY = 0x2 + RTF_GLOBAL = 0x40000000 + RTF_HOST = 0x4 + RTF_IFREF = 0x4000000 + RTF_IFSCOPE = 0x1000000 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_NOIFREF = 0x2000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_PROXY = 0x8000000 + RTF_REJECT = 0x8 + RTF_ROUTER = 0x10000000 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_WASCLONED = 0x20000 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_GET2 = 0x14 + RTM_IFINFO = 0xe + RTM_IFINFO2 = 0x12 + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_NEWMADDR2 = 0x13 + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIMESTAMP_MONOTONIC = 0x4 + SEEK_CUR = 0x1 + SEEK_DATA = 0x4 + SEEK_END = 0x2 + SEEK_HOLE = 0x3 + SEEK_SET = 0x0 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCARPIPLL = 0xc0206928 + SIOCATMARK = 0x40047307 + SIOCAUTOADDR = 0xc0206926 + SIOCAUTONETMASK = 0x80206927 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFPHYADDR = 0x80206941 + SIOCGDRVSPEC = 0xc028697b + SIOCGETVLAN = 0xc020697f + SIOCGHIWAT = 0x40047301 + SIOCGIF6LOWPAN = 0xc02069c5 + SIOCGIFADDR = 0xc0206921 + SIOCGIFALTMTU = 0xc0206948 + SIOCGIFASYNCMAP = 0xc020697c + SIOCGIFBOND = 0xc0206947 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020695b + SIOCGIFCONF = 0xc00c6924 + SIOCGIFDEVMTU = 0xc0206944 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFFUNCTIONALTYPE = 0xc02069ad + SIOCGIFGENERIC = 0xc020693a + SIOCGIFKPI = 0xc0206987 + SIOCGIFMAC = 0xc0206982 + SIOCGIFMEDIA = 0xc02c6938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206940 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc020693f + SIOCGIFSTATUS = 0xc331693d + SIOCGIFVLAN = 0xc020697f + SIOCGIFWAKEFLAGS = 0xc0206988 + SIOCGIFXMEDIA = 0xc02c6948 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCIFCREATE = 0xc0206978 + SIOCIFCREATE2 = 0xc020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106981 + SIOCRSLVMULTI = 0xc010693b + SIOCSDRVSPEC = 0x8028697b + SIOCSETVLAN = 0x8020697e + SIOCSHIWAT = 0x80047300 + SIOCSIF6LOWPAN = 0x802069c4 + SIOCSIFADDR = 0x8020690c + SIOCSIFALTMTU = 0x80206945 + SIOCSIFASYNCMAP = 0x8020697d + SIOCSIFBOND = 0x80206946 + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020695a + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFKPI = 0x80206986 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206983 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x8040693e + SIOCSIFPHYS = 0x80206936 + SIOCSIFVLAN = 0x8020697e + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_DONTTRUNC = 0x2000 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1010 + SO_LINGER = 0x80 + SO_LINGER_SEC = 0x1080 + SO_NETSVC_MARKING_LEVEL = 0x1119 + SO_NET_SERVICE_TYPE = 0x1116 + SO_NKE = 0x1021 + SO_NOADDRERR = 0x1023 + SO_NOSIGPIPE = 0x1022 + SO_NOTIFYCONFLICT = 0x1026 + SO_NP_EXTENSIONS = 0x1083 + SO_NREAD = 0x1020 + SO_NUMRCVPKT = 0x1112 + SO_NWRITE = 0x1024 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1011 + SO_RANDOMPORT = 0x1082 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSESHAREUID = 0x1025 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TIMESTAMP_MONOTONIC = 0x800 + SO_TRACKER_ATTRIBUTE_FLAGS_APP_APPROVED = 0x1 + SO_TRACKER_ATTRIBUTE_FLAGS_DOMAIN_SHORT = 0x4 + SO_TRACKER_ATTRIBUTE_FLAGS_TRACKER = 0x2 + SO_TRACKER_TRANSPARENCY_VERSION = 0x3 + SO_TYPE = 0x1008 + SO_UPCALLCLOSEWAIT = 0x1027 + SO_USELOOPBACK = 0x40 + SO_WANTMORE = 0x4000 + SO_WANTOOBFLAG = 0x8000 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0x4 + TABDLY = 0xc04 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_CC = 0xb + TCPOPT_CCECHO = 0xd + TCPOPT_CCNEW = 0xc + TCPOPT_EOL = 0x0 + TCPOPT_FASTOPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_CONNECTIONTIMEOUT = 0x20 + TCP_CONNECTION_INFO = 0x106 + TCP_ENABLE_ECN = 0x104 + TCP_FASTOPEN = 0x105 + TCP_KEEPALIVE = 0x10 + TCP_KEEPCNT = 0x102 + TCP_KEEPINTVL = 0x101 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MINMSS = 0xd8 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NOTSENT_LOWAT = 0x201 + TCP_RXT_CONNDROPTIME = 0x80 + TCP_RXT_FINDROP = 0x100 + TCP_SENDMOREACKS = 0x103 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDCDTIMESTAMP = 0x40107458 + TIOCDRAIN = 0x2000745e + TIOCDSIMICROCODE = 0x20007455 + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x40487413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGWINSZ = 0x40087468 + TIOCIXOFF = 0x20007480 + TIOCIXON = 0x20007481 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTYGNAME = 0x40807453 + TIOCPTYGRANT = 0x20007454 + TIOCPTYUNLK = 0x20007452 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCONS = 0x20007463 + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x80487414 + TIOCSETAF = 0x80487416 + TIOCSETAW = 0x80487415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2000745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x10 + VM_LOADAVG = 0x2 + VM_MACHFACTOR = 0x4 + VM_MAXID = 0x6 + VM_METER = 0x1 + VM_SWAPUSAGE = 0x5 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VT0 = 0x0 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x10 + WCOREFLAG = 0x80 + WEXITED = 0x4 + WNOHANG = 0x1 + WNOWAIT = 0x20 + WORDSIZE = 0x40 + WSTOPPED = 0x8 + WUNTRACED = 0x2 + XATTR_CREATE = 0x2 + XATTR_NODEFAULT = 0x10 + XATTR_NOFOLLOW = 0x1 + XATTR_NOSECURITY = 0x8 + XATTR_REPLACE = 0x4 + XATTR_SHOWCOMPRESSION = 0x20 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index b959fe1957..d175aae896 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1,4 +1,4 @@ -// Code generated by mkmerge.go; DO NOT EDIT. +// Code generated by mkmerge; DO NOT EDIT. //go:build linux // +build linux @@ -116,6 +116,7 @@ const ( ARPHRD_LAPB = 0x204 ARPHRD_LOCALTLK = 0x305 ARPHRD_LOOPBACK = 0x304 + ARPHRD_MCTP = 0x122 ARPHRD_METRICOM = 0x17 ARPHRD_NETLINK = 0x338 ARPHRD_NETROM = 0x0 @@ -472,6 +473,7 @@ const ( DM_DEV_WAIT = 0xc138fd08 DM_DIR = "mapper" DM_GET_TARGET_VERSION = 0xc138fd11 + DM_IMA_MEASUREMENT_FLAG = 0x80000 DM_INACTIVE_PRESENT_FLAG = 0x40 DM_INTERNAL_SUSPEND_FLAG = 0x40000 DM_IOCTL = 0xfd @@ -716,6 +718,7 @@ const ( ETH_P_LOOPBACK = 0x9000 ETH_P_MACSEC = 0x88e5 ETH_P_MAP = 0xf9 + ETH_P_MCTP = 0xfa ETH_P_MOBITEX = 0x15 ETH_P_MPLS_MC = 0x8848 ETH_P_MPLS_UC = 0x8847 @@ -751,6 +754,21 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EV_ABS = 0x3 + EV_CNT = 0x20 + EV_FF = 0x15 + EV_FF_STATUS = 0x17 + EV_KEY = 0x1 + EV_LED = 0x11 + EV_MAX = 0x1f + EV_MSC = 0x4 + EV_PWR = 0x16 + EV_REL = 0x2 + EV_REP = 0x14 + EV_SND = 0x12 + EV_SW = 0x5 + EV_SYN = 0x0 + EV_VERSION = 0x10001 EXABYTE_ENABLE_NEST = 0xf0 EXT2_SUPER_MAGIC = 0xef53 EXT3_SUPER_MAGIC = 0xef53 @@ -789,9 +807,11 @@ const ( FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EPIDFD = -0x2 FAN_EVENT_INFO_TYPE_DFID = 0x3 FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_PIDFD = 0x4 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -811,6 +831,7 @@ const ( FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 + FAN_NOPIDFD = -0x1 FAN_ONDIR = 0x40000000 FAN_OPEN = 0x20 FAN_OPEN_EXEC = 0x1000 @@ -821,6 +842,7 @@ const ( FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 + FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1397,6 +1419,8 @@ const ( MADV_NOHUGEPAGE = 0xf MADV_NORMAL = 0x0 MADV_PAGEOUT = 0x15 + MADV_POPULATE_READ = 0x16 + MADV_POPULATE_WRITE = 0x17 MADV_RANDOM = 0x1 MADV_REMOVE = 0x9 MADV_SEQUENTIAL = 0x2 @@ -1452,6 +1476,18 @@ const ( MNT_FORCE = 0x1 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 + MOUNT_ATTR_IDMAP = 0x100000 + MOUNT_ATTR_NOATIME = 0x10 + MOUNT_ATTR_NODEV = 0x4 + MOUNT_ATTR_NODIRATIME = 0x80 + MOUNT_ATTR_NOEXEC = 0x8 + MOUNT_ATTR_NOSUID = 0x2 + MOUNT_ATTR_NOSYMFOLLOW = 0x200000 + MOUNT_ATTR_RDONLY = 0x1 + MOUNT_ATTR_RELATIME = 0x0 + MOUNT_ATTR_SIZE_VER0 = 0x20 + MOUNT_ATTR_STRICTATIME = 0x20 + MOUNT_ATTR__ATIME = 0x70 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -1995,6 +2031,7 @@ const ( PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 + PR_SPEC_L1D_FLUSH = 0x2 PR_SPEC_NOT_AFFECTED = 0x0 PR_SPEC_PRCTL = 0x1 PR_SPEC_STORE_BYPASS = 0x0 @@ -2430,12 +2467,15 @@ const ( SMART_WRITE_THRESHOLDS = 0xd7 SMB_SUPER_MAGIC = 0x517b SOCKFS_MAGIC = 0x534f434b + SOCK_BUF_LOCK_MASK = 0x3 SOCK_DCCP = 0x6 SOCK_IOC_TYPE = 0x89 SOCK_PACKET = 0xa SOCK_RAW = 0x3 + SOCK_RCVBUF_LOCK = 0x2 SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 + SOCK_SNDBUF_LOCK = 0x1 SOL_AAL = 0x109 SOL_ALG = 0x117 SOL_ATM = 0x108 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 697811a460..3ca40ca7f0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -5,7 +5,7 @@ // +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/_const.go package unix @@ -293,6 +293,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 7d8d93bfc4..ead332091a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -5,7 +5,7 @@ // +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/_const.go package unix @@ -294,6 +294,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index f707d50894..39bdc94558 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -5,7 +5,7 @@ // +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -300,6 +300,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 3a67a9c852..9aec987db1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -5,7 +5,7 @@ // +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go package unix @@ -290,6 +290,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index a7ccef56c5..a8bba9491e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -5,7 +5,7 @@ // +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -293,6 +293,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index f7b7cec910..ee9e7e2020 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -5,7 +5,7 @@ // +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -293,6 +293,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 4fcacf9584..ba4b288a3c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -5,7 +5,7 @@ // +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -293,6 +293,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 6f6c223a2c..bc93afc367 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -5,7 +5,7 @@ // +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -293,6 +293,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 59e522bcf4..9295e69478 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -5,7 +5,7 @@ // +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -348,6 +348,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index d4264a0f73..1fa081c9a6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -5,7 +5,7 @@ // +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -352,6 +352,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 21cbec1dd3..74b3211494 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -5,7 +5,7 @@ // +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -352,6 +352,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 9b05bf12fc..c91c8ac5b0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -5,7 +5,7 @@ // +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -281,6 +281,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index bd82ace09a..b66bf22289 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -5,7 +5,7 @@ // +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go package unix @@ -356,6 +356,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 1f8bded56b..f7fb149b0c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -5,7 +5,7 @@ // +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -347,6 +347,7 @@ const ( SO_BPF_EXTENSIONS = 0x32 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0x400 + SO_BUF_LOCK = 0x51 SO_BUSY_POLL = 0x30 SO_BUSY_POLL_BUDGET = 0x49 SO_CNX_ADVICE = 0x37 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index 91a23cc728..85e0cc3866 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -17,6 +17,7 @@ int getdirent(int, uintptr_t, size_t); int wait4(int, uintptr_t, int, uintptr_t); int ioctl(int, int, uintptr_t); int fcntl(uintptr_t, int, uintptr_t); +int fsync_range(int, int, long long, long long); int acct(uintptr_t); int chdir(uintptr_t); int chroot(uintptr_t); @@ -29,7 +30,6 @@ int fchmod(int, unsigned int); int fchmodat(int, uintptr_t, unsigned int, int); int fchownat(int, uintptr_t, int, int, int); int fdatasync(int); -int fsync(int); int getpgid(int); int getpgrp(); int getpid(); @@ -255,6 +255,16 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsyncRange(fd int, how int, start int64, length int64) (err error) { + r0, er := C.fsync_range(C.int(fd), C.int(how), C.longlong(start), C.longlong(length)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Acct(path string) (err error) { _p0 := uintptr(unsafe.Pointer(C.CString(path))) r0, er := C.acct(C.uintptr_t(_p0)) @@ -379,16 +389,6 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - r0, er := C.fsync(C.int(fd)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getpgid(pid int) (pgid int, err error) { r0, er := C.getpgid(C.int(pid)) pgid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index 33c2609b8b..f1d4a73b08 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -135,6 +135,16 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsyncRange(fd int, how int, start int64, length int64) (err error) { + _, e1 := callfsync_range(fd, how, start, length) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Acct(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -283,16 +293,6 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - _, e1 := callfsync(fd) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getpgid(pid int) (pgid int, err error) { r0, e1 := callgetpgid(pid) pgid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index 8b737fa971..2caa5adf95 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -18,6 +18,7 @@ import ( //go:cgo_import_dynamic libc_wait4 wait4 "libc.a/shr_64.o" //go:cgo_import_dynamic libc_ioctl ioctl "libc.a/shr_64.o" //go:cgo_import_dynamic libc_fcntl fcntl "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fsync_range fsync_range "libc.a/shr_64.o" //go:cgo_import_dynamic libc_acct acct "libc.a/shr_64.o" //go:cgo_import_dynamic libc_chdir chdir "libc.a/shr_64.o" //go:cgo_import_dynamic libc_chroot chroot "libc.a/shr_64.o" @@ -30,7 +31,6 @@ import ( //go:cgo_import_dynamic libc_fchmodat fchmodat "libc.a/shr_64.o" //go:cgo_import_dynamic libc_fchownat fchownat "libc.a/shr_64.o" //go:cgo_import_dynamic libc_fdatasync fdatasync "libc.a/shr_64.o" -//go:cgo_import_dynamic libc_fsync fsync "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getpgid getpgid "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getpgrp getpgrp "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getpid getpid "libc.a/shr_64.o" @@ -136,6 +136,7 @@ import ( //go:linkname libc_wait4 libc_wait4 //go:linkname libc_ioctl libc_ioctl //go:linkname libc_fcntl libc_fcntl +//go:linkname libc_fsync_range libc_fsync_range //go:linkname libc_acct libc_acct //go:linkname libc_chdir libc_chdir //go:linkname libc_chroot libc_chroot @@ -148,7 +149,6 @@ import ( //go:linkname libc_fchmodat libc_fchmodat //go:linkname libc_fchownat libc_fchownat //go:linkname libc_fdatasync libc_fdatasync -//go:linkname libc_fsync libc_fsync //go:linkname libc_getpgid libc_getpgid //go:linkname libc_getpgrp libc_getpgrp //go:linkname libc_getpid libc_getpid @@ -257,6 +257,7 @@ var ( libc_wait4, libc_ioctl, libc_fcntl, + libc_fsync_range, libc_acct, libc_chdir, libc_chroot, @@ -269,7 +270,6 @@ var ( libc_fchmodat, libc_fchownat, libc_fdatasync, - libc_fsync, libc_getpgid, libc_getpgrp, libc_getpid, @@ -430,6 +430,13 @@ func callfcntl(fd uintptr, cmd int, arg uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callfsync_range(fd int, how int, start int64, length int64) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fsync_range)), 4, uintptr(fd), uintptr(how), uintptr(start), uintptr(length), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callacct(_p0 uintptr) (r1 uintptr, e1 Errno) { r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_acct)), 1, _p0, 0, 0, 0, 0, 0) return @@ -514,13 +521,6 @@ func callfdatasync(fd int) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callfsync(fd int) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fsync)), 1, uintptr(fd), 0, 0, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func callgetpgid(pid int) (r1 uintptr, e1 Errno) { r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index 3c260917ed..944a714b1a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -16,6 +16,7 @@ int getdirent(int, uintptr_t, size_t); int wait4(int, uintptr_t, int, uintptr_t); int ioctl(int, int, uintptr_t); int fcntl(uintptr_t, int, uintptr_t); +int fsync_range(int, int, long long, long long); int acct(uintptr_t); int chdir(uintptr_t); int chroot(uintptr_t); @@ -28,7 +29,6 @@ int fchmod(int, unsigned int); int fchmodat(int, uintptr_t, unsigned int, int); int fchownat(int, uintptr_t, int, int, int); int fdatasync(int); -int fsync(int); int getpgid(int); int getpgrp(); int getpid(); @@ -199,6 +199,14 @@ func callfcntl(fd uintptr, cmd int, arg uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callfsync_range(fd int, how int, start int64, length int64) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.fsync_range(C.int(fd), C.int(how), C.longlong(start), C.longlong(length))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callacct(_p0 uintptr) (r1 uintptr, e1 Errno) { r1 = uintptr(C.acct(C.uintptr_t(_p0))) e1 = syscall.GetErrno() @@ -295,14 +303,6 @@ func callfdatasync(fd int) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callfsync(fd int) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.fsync(C.int(fd))) - e1 = syscall.GetErrno() - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func callgetpgid(pid int) (r1 uintptr, e1 Errno) { r1 = uintptr(C.getpgid(C.int(pid))) e1 = syscall.GetErrno() diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index d4efe8d457..0ae0ed4cb8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -734,6 +734,65 @@ var libc_sendfile_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func shmat(id int, addr uintptr, flag int) (ret uintptr, err error) { + r0, _, e1 := syscall_syscall(libc_shmat_trampoline_addr, uintptr(id), uintptr(addr), uintptr(flag)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shmat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shmat shmat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) { + r0, _, e1 := syscall_syscall(libc_shmctl_trampoline_addr, uintptr(id), uintptr(cmd), uintptr(unsafe.Pointer(buf))) + result = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shmctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shmctl shmctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmdt(addr uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_shmdt_trampoline_addr, uintptr(addr), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shmdt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shmdt shmdt "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmget(key int, size int, flag int) (id int, err error) { + r0, _, e1 := syscall_syscall(libc_shmget_trampoline_addr, uintptr(key), uintptr(size), uintptr(flag)) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shmget_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shmget shmget "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index bc169c2ab9..eac6ca806f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -264,6 +264,30 @@ TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) +TEXT libc_shmat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shmat(SB) + +GLOBL ·libc_shmat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shmat_trampoline_addr(SB)/8, $libc_shmat_trampoline<>(SB) + +TEXT libc_shmctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shmctl(SB) + +GLOBL ·libc_shmctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shmctl_trampoline_addr(SB)/8, $libc_shmctl_trampoline<>(SB) + +TEXT libc_shmdt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shmdt(SB) + +GLOBL ·libc_shmdt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shmdt_trampoline_addr(SB)/8, $libc_shmdt_trampoline<>(SB) + +TEXT libc_shmget_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shmget(SB) + +GLOBL ·libc_shmget_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shmget_trampoline_addr(SB)/8, $libc_shmget_trampoline<>(SB) + TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index f2ee2bd33b..cf71be3edb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -734,6 +734,65 @@ var libc_sendfile_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func shmat(id int, addr uintptr, flag int) (ret uintptr, err error) { + r0, _, e1 := syscall_syscall(libc_shmat_trampoline_addr, uintptr(id), uintptr(addr), uintptr(flag)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shmat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shmat shmat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) { + r0, _, e1 := syscall_syscall(libc_shmctl_trampoline_addr, uintptr(id), uintptr(cmd), uintptr(unsafe.Pointer(buf))) + result = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shmctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shmctl shmctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmdt(addr uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_shmdt_trampoline_addr, uintptr(addr), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shmdt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shmdt shmdt "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmget(key int, size int, flag int) (id int, err error) { + r0, _, e1 := syscall_syscall(libc_shmget_trampoline_addr, uintptr(key), uintptr(size), uintptr(flag)) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shmget_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shmget shmget "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 33e19776db..4ebcf21758 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -264,6 +264,30 @@ TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) +TEXT libc_shmat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shmat(SB) + +GLOBL ·libc_shmat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shmat_trampoline_addr(SB)/8, $libc_shmat_trampoline<>(SB) + +TEXT libc_shmctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shmctl(SB) + +GLOBL ·libc_shmctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shmctl_trampoline_addr(SB)/8, $libc_shmctl_trampoline<>(SB) + +TEXT libc_shmdt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shmdt(SB) + +GLOBL ·libc_shmdt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shmdt_trampoline_addr(SB)/8, $libc_shmdt_trampoline<>(SB) + +TEXT libc_shmget_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shmget(SB) + +GLOBL ·libc_shmget_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shmget_trampoline_addr(SB)/8, $libc_shmget_trampoline<>(SB) + TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 701f7eb88f..93edda4c49 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1,4 +1,4 @@ -// Code generated by mkmerge.go; DO NOT EDIT. +// Code generated by mkmerge; DO NOT EDIT. //go:build linux // +build linux @@ -409,6 +409,21 @@ func mount(source string, target string, fstype string, flags uintptr, data *byt // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func mountSetattr(dirfd int, pathname string, flags uint, attr *MountAttr, size uintptr) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT_SETATTR, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(unsafe.Pointer(attr)), uintptr(size), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Acct(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1974,3 +1989,46 @@ func PidfdGetfd(pidfd int, targetfd int, flags int) (fd int, err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmat(id int, addr uintptr, flag int) (ret uintptr, err error) { + r0, _, e1 := Syscall(SYS_SHMAT, uintptr(id), uintptr(addr), uintptr(flag)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) { + r0, _, e1 := Syscall(SYS_SHMCTL, uintptr(id), uintptr(cmd), uintptr(unsafe.Pointer(buf))) + result = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmdt(addr uintptr) (err error) { + _, _, e1 := Syscall(SYS_SHMDT, uintptr(addr), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmget(key int, size int, flag int) (id int, err error) { + r0, _, e1 := Syscall(SYS_SHMGET, uintptr(key), uintptr(size), uintptr(flag)) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index f32cec9956..ff90c81e73 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -46,16 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -534,14 +524,3 @@ func utimes(path string, times *[2]Timeval) (err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index c9bf2fe271..fa7d3dbe4e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -46,16 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -689,17 +679,6 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(cmdline) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 39cca7140e..654f91530f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -225,16 +225,6 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -649,17 +639,6 @@ func setrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func armSyncFileRange(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_ARM_SYNC_FILE_RANGE, uintptr(fd), uintptr(flags), uintptr(off), uintptr(off>>32), uintptr(n), uintptr(n>>32)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 1bf9898265..6d15528853 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -46,16 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -712,14 +702,3 @@ func setrlimit(resource int, rlim *rlimit32) (err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index f7d253f8fd..1e20d72df2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -46,16 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -706,14 +696,3 @@ func stat(path string, st *stat_t) (err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index f5799bf521..82b5e2d9ed 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -46,16 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -706,14 +696,3 @@ func stat(path string, st *stat_t) (err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 199e4ef4cd..a0440c1d43 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -46,16 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -712,14 +702,3 @@ func setrlimit(resource int, rlim *rlimit32) (err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index daff312f16..5864b9ca64 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -46,16 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -695,17 +685,6 @@ func setrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off>>32), uintptr(off), uintptr(n>>32), uintptr(n)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index aa164b2fb0..beeb49e342 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -46,16 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -741,17 +731,6 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off), uintptr(n), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index f289eac7bb..53139b82c7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -46,16 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -741,17 +731,6 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off), uintptr(n), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index d7bf4b10a9..202add37d1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -46,16 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -531,17 +521,6 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(cmdline) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 8329e914d3..2ab268c343 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -73,16 +73,6 @@ func Fadvise(fd int, offset int64, length int64, advice int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -707,14 +697,3 @@ func utimes(path string, times *[2]Timeval) (err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index aa7ce85d15..31847d2305 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -444,4 +444,5 @@ const ( SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_MEMFD_SECRET = 447 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index b830326386..3503cbbde3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -366,4 +366,5 @@ const ( SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_MEMFD_SECRET = 447 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index d75f65a0aa..5ecd24bf68 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -7,6 +7,7 @@ package unix const ( + SYS_SYSCALL_MASK = 0 SYS_RESTART_SYSCALL = 0 SYS_EXIT = 1 SYS_FORK = 2 @@ -407,4 +408,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 8b02f09e9b..7e5c94cc7f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -311,4 +311,5 @@ const ( SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_MEMFD_SECRET = 447 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 026695abb1..e1e2a2bf59 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -428,4 +428,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 4444 SYS_LANDLOCK_ADD_RULE = 4445 SYS_LANDLOCK_RESTRICT_SELF = 4446 + SYS_PROCESS_MRELEASE = 4448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 7320ba9583..7651915a3a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -358,4 +358,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 5444 SYS_LANDLOCK_ADD_RULE = 5445 SYS_LANDLOCK_RESTRICT_SELF = 5446 + SYS_PROCESS_MRELEASE = 5448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 45082dd67f..a26a2c050b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -358,4 +358,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 5444 SYS_LANDLOCK_ADD_RULE = 5445 SYS_LANDLOCK_RESTRICT_SELF = 5446 + SYS_PROCESS_MRELEASE = 5448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 570a857a56..fda9a6a991 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -428,4 +428,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 4444 SYS_LANDLOCK_ADD_RULE = 4445 SYS_LANDLOCK_RESTRICT_SELF = 4446 + SYS_PROCESS_MRELEASE = 4448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 638498d62e..e8496150d4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -435,4 +435,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 702beebfef..5ee0678a36 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -407,4 +407,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index bfc87ea444..29c0f9a39e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -407,4 +407,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a390e147d3..5c9a9a3b61 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -309,4 +309,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 3e791e6cd2..913f50f98b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -372,4 +372,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 78802a5cf7..0de03a7227 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -386,4 +386,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 4c8dc0ba2e..885842c0eb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -209,6 +209,92 @@ type RawSockaddrCtl struct { Sc_reserved [5]uint32 } +type RawSockaddrVM struct { + Len uint8 + Family uint8 + Reserved1 uint16 + Port uint32 + Cid uint32 +} + +type XVSockPCB struct { + Xv_len uint32 + Xv_vsockpp uint64 + Xvp_local_cid uint32 + Xvp_local_port uint32 + Xvp_remote_cid uint32 + Xvp_remote_port uint32 + Xvp_rxcnt uint32 + Xvp_txcnt uint32 + Xvp_peer_rxhiwat uint32 + Xvp_peer_rxcnt uint32 + Xvp_last_pid int32 + Xvp_gencnt uint64 + Xv_socket XSocket + _ [4]byte +} + +type XSocket struct { + Xso_len uint32 + Xso_so uint32 + So_type int16 + So_options int16 + So_linger int16 + So_state int16 + So_pcb uint32 + Xso_protocol int32 + Xso_family int32 + So_qlen int16 + So_incqlen int16 + So_qlimit int16 + So_timeo int16 + So_error uint16 + So_pgid int32 + So_oobmark uint32 + So_rcv XSockbuf + So_snd XSockbuf + So_uid uint32 +} + +type XSocket64 struct { + Xso_len uint32 + _ [8]byte + So_type int16 + So_options int16 + So_linger int16 + So_state int16 + _ [8]byte + Xso_protocol int32 + Xso_family int32 + So_qlen int16 + So_incqlen int16 + So_qlimit int16 + So_timeo int16 + So_error uint16 + So_pgid int32 + So_oobmark uint32 + So_rcv XSockbuf + So_snd XSockbuf + So_uid uint32 +} + +type XSockbuf struct { + Cc uint32 + Hiwat uint32 + Mbcnt uint32 + Mbmax uint32 + Lowat int32 + Flags int16 + Timeo int16 +} + +type XVSockPgen struct { + Len uint32 + Count uint64 + Gen uint64 + Sogen uint64 +} + type _Socklen uint32 type Xucred struct { @@ -287,6 +373,11 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 SizeofSockaddrCtl = 0x20 + SizeofSockaddrVM = 0xc + SizeofXvsockpcb = 0xa8 + SizeofXSocket = 0x64 + SizeofXSockbuf = 0x18 + SizeofXVSockPgen = 0x20 SizeofXucred = 0x4c SizeofLinger = 0x8 SizeofIovec = 0x10 @@ -550,13 +641,13 @@ type Eproc struct { Tdev int32 Tpgid int32 Tsess uintptr - Wmesg [8]int8 + Wmesg [8]byte Xsize int32 Xrssize int16 Xccount int16 Xswrss int16 Flag int32 - Login [12]int8 + Login [12]byte Spare [4]int32 _ [4]byte } @@ -597,7 +688,7 @@ type ExternProc struct { P_priority uint8 P_usrpri uint8 P_nice int8 - P_comm [17]int8 + P_comm [17]byte P_pgrp uintptr P_addr uintptr P_xstat uint16 @@ -639,3 +730,39 @@ type Ucred struct { Ngroups int16 Groups [16]uint32 } + +type SysvIpcPerm struct { + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint16 + _ uint16 + _ int32 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Lpid int32 + Cpid int32 + Nattch uint16 + _ [34]byte +} + +const ( + IPC_CREAT = 0x200 + IPC_EXCL = 0x400 + IPC_NOWAIT = 0x800 + IPC_PRIVATE = 0x0 +) + +const ( + IPC_RMID = 0x0 + IPC_SET = 0x1 + IPC_STAT = 0x2 +) + +const ( + SHM_RDONLY = 0x1000 + SHM_RND = 0x2000 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 96f0e6ae2a..b23c02337d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -209,6 +209,92 @@ type RawSockaddrCtl struct { Sc_reserved [5]uint32 } +type RawSockaddrVM struct { + Len uint8 + Family uint8 + Reserved1 uint16 + Port uint32 + Cid uint32 +} + +type XVSockPCB struct { + Xv_len uint32 + Xv_vsockpp uint64 + Xvp_local_cid uint32 + Xvp_local_port uint32 + Xvp_remote_cid uint32 + Xvp_remote_port uint32 + Xvp_rxcnt uint32 + Xvp_txcnt uint32 + Xvp_peer_rxhiwat uint32 + Xvp_peer_rxcnt uint32 + Xvp_last_pid int32 + Xvp_gencnt uint64 + Xv_socket XSocket + _ [4]byte +} + +type XSocket struct { + Xso_len uint32 + Xso_so uint32 + So_type int16 + So_options int16 + So_linger int16 + So_state int16 + So_pcb uint32 + Xso_protocol int32 + Xso_family int32 + So_qlen int16 + So_incqlen int16 + So_qlimit int16 + So_timeo int16 + So_error uint16 + So_pgid int32 + So_oobmark uint32 + So_rcv XSockbuf + So_snd XSockbuf + So_uid uint32 +} + +type XSocket64 struct { + Xso_len uint32 + _ [8]byte + So_type int16 + So_options int16 + So_linger int16 + So_state int16 + _ [8]byte + Xso_protocol int32 + Xso_family int32 + So_qlen int16 + So_incqlen int16 + So_qlimit int16 + So_timeo int16 + So_error uint16 + So_pgid int32 + So_oobmark uint32 + So_rcv XSockbuf + So_snd XSockbuf + So_uid uint32 +} + +type XSockbuf struct { + Cc uint32 + Hiwat uint32 + Mbcnt uint32 + Mbmax uint32 + Lowat int32 + Flags int16 + Timeo int16 +} + +type XVSockPgen struct { + Len uint32 + Count uint64 + Gen uint64 + Sogen uint64 +} + type _Socklen uint32 type Xucred struct { @@ -287,6 +373,11 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 SizeofSockaddrCtl = 0x20 + SizeofSockaddrVM = 0xc + SizeofXvsockpcb = 0xa8 + SizeofXSocket = 0x64 + SizeofXSockbuf = 0x18 + SizeofXVSockPgen = 0x20 SizeofXucred = 0x4c SizeofLinger = 0x8 SizeofIovec = 0x10 @@ -550,13 +641,13 @@ type Eproc struct { Tdev int32 Tpgid int32 Tsess uintptr - Wmesg [8]int8 + Wmesg [8]byte Xsize int32 Xrssize int16 Xccount int16 Xswrss int16 Flag int32 - Login [12]int8 + Login [12]byte Spare [4]int32 _ [4]byte } @@ -597,7 +688,7 @@ type ExternProc struct { P_priority uint8 P_usrpri uint8 P_nice int8 - P_comm [17]int8 + P_comm [17]byte P_pgrp uintptr P_addr uintptr P_xstat uint16 @@ -639,3 +730,39 @@ type Ucred struct { Ngroups int16 Groups [16]uint32 } + +type SysvIpcPerm struct { + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint16 + _ uint16 + _ int32 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Lpid int32 + Cpid int32 + Nattch uint16 + _ [34]byte +} + +const ( + IPC_CREAT = 0x200 + IPC_EXCL = 0x400 + IPC_NOWAIT = 0x800 + IPC_PRIVATE = 0x0 +) + +const ( + IPC_RMID = 0x0 + IPC_SET = 0x1 + IPC_STAT = 0x2 +) + +const ( + SHM_RDONLY = 0x1000 + SHM_RND = 0x2000 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 06dcd787b2..37b521436b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1,4 +1,4 @@ -// Code generated by mkmerge.go; DO NOT EDIT. +// Code generated by mkmerge; DO NOT EDIT. //go:build linux // +build linux @@ -743,6 +743,8 @@ const ( AT_STATX_FORCE_SYNC = 0x2000 AT_STATX_DONT_SYNC = 0x4000 + AT_RECURSIVE = 0x8000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 @@ -3264,7 +3266,8 @@ const ( LWTUNNEL_ENCAP_BPF = 0x6 LWTUNNEL_ENCAP_SEG6_LOCAL = 0x7 LWTUNNEL_ENCAP_RPL = 0x8 - LWTUNNEL_ENCAP_MAX = 0x8 + LWTUNNEL_ENCAP_IOAM6 = 0x9 + LWTUNNEL_ENCAP_MAX = 0x9 MPLS_IPTUNNEL_UNSPEC = 0x0 MPLS_IPTUNNEL_DST = 0x1 @@ -3617,7 +3620,9 @@ const ( ETHTOOL_A_COALESCE_TX_USECS_HIGH = 0x15 ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH = 0x16 ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 - ETHTOOL_A_COALESCE_MAX = 0x17 + ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 + ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 + ETHTOOL_A_COALESCE_MAX = 0x19 ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -3936,3 +3941,30 @@ type LandlockPathBeneathAttr struct { const ( LANDLOCK_RULE_PATH_BENEATH = 0x1 ) + +const ( + IPC_CREAT = 0x200 + IPC_EXCL = 0x400 + IPC_NOWAIT = 0x800 + IPC_PRIVATE = 0x0 + + ipc_64 = 0x100 +) + +const ( + IPC_RMID = 0x0 + IPC_SET = 0x1 + IPC_STAT = 0x2 +) + +const ( + SHM_RDONLY = 0x1000 + SHM_RND = 0x2000 +) + +type MountAttr struct { + Attr_set uint64 + Attr_clr uint64 + Propagation uint64 + Userns_fd uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 3219adedaf..bea2549455 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux @@ -639,3 +639,32 @@ const ( const ( PIDFD_NONBLOCK = 0x800 ) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint16 + _ [2]uint8 + Seq uint16 + _ uint16 + _ uint32 + _ uint32 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint32 + Atime uint32 + Atime_high uint32 + Dtime uint32 + Dtime_high uint32 + Ctime uint32 + Ctime_high uint32 + Cpid int32 + Lpid int32 + Nattch uint32 + _ uint32 + _ uint32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 16acd3bcbf..b8c8f28943 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux @@ -657,3 +657,29 @@ const ( const ( PIDFD_NONBLOCK = 0x800 ) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index c4982a2298..4db4430163 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux @@ -634,3 +634,32 @@ const ( const ( PIDFD_NONBLOCK = 0x800 ) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint16 + _ [2]uint8 + Seq uint16 + _ uint16 + _ uint32 + _ uint32 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint32 + Atime uint32 + Atime_high uint32 + Dtime uint32 + Dtime_high uint32 + Ctime uint32 + Ctime_high uint32 + Cpid int32 + Lpid int32 + Nattch uint32 + _ uint32 + _ uint32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 98bb8a41a7..3ebcad8a88 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux @@ -636,3 +636,29 @@ const ( const ( PIDFD_NONBLOCK = 0x800 ) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index d5bfc35656..3eb33e48ab 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux @@ -640,3 +640,31 @@ const ( const ( PIDFD_NONBLOCK = 0x80 ) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint32 + _ uint32 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint32 + Atime uint32 + Dtime uint32 + Ctime uint32 + Cpid int32 + Lpid int32 + Nattch uint32 + Atime_high uint16 + Dtime_high uint16 + Ctime_high uint16 + _ uint16 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index b52c568dc9..79a9446725 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux @@ -639,3 +639,29 @@ const ( const ( PIDFD_NONBLOCK = 0x80 ) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index a340b84b9c..8f4b107cad 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux @@ -639,3 +639,29 @@ const ( const ( PIDFD_NONBLOCK = 0x80 ) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index b43d8e2ce4..e4eb217981 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux @@ -640,3 +640,31 @@ const ( const ( PIDFD_NONBLOCK = 0x80 ) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint32 + _ uint32 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint32 + Atime uint32 + Dtime uint32 + Ctime uint32 + Cpid int32 + Lpid int32 + Nattch uint32 + Atime_high uint16 + Dtime_high uint16 + Ctime_high uint16 + _ uint16 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index efd7313a76..d5b21f0f7d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux @@ -646,3 +646,33 @@ const ( const ( PIDFD_NONBLOCK = 0x800 ) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + Seq uint32 + _ uint32 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Atime_high uint32 + Atime uint32 + Dtime_high uint32 + Dtime uint32 + Ctime_high uint32 + Ctime uint32 + _ uint32 + Segsz uint32 + Cpid int32 + Lpid int32 + Nattch uint32 + _ uint32 + _ uint32 + _ [4]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 22cedda571..5188d142b9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux @@ -646,3 +646,28 @@ const ( const ( PIDFD_NONBLOCK = 0x800 ) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + Seq uint32 + _ uint32 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Atime int64 + Dtime int64 + Ctime int64 + Segsz uint64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 452a76df1c..de4dd4c736 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux @@ -646,3 +646,28 @@ const ( const ( PIDFD_NONBLOCK = 0x800 ) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + Seq uint32 + _ uint32 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Atime int64 + Dtime int64 + Ctime int64 + Segsz uint64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 96c667df4d..dccbf9b060 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux @@ -664,3 +664,29 @@ const ( const ( PIDFD_NONBLOCK = 0x800 ) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index af04ee1748..6358806106 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux @@ -660,3 +660,28 @@ const ( const ( PIDFD_NONBLOCK = 0x800 ) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ uint16 + Seq uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 6f385cf6ab..765edc13ff 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux @@ -641,3 +641,28 @@ const ( const ( PIDFD_NONBLOCK = 0x4000 ) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ uint16 + Seq uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Atime int64 + Dtime int64 + Ctime int64 + Segsz uint64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 2a8b1e6f73..baf5fe6504 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -564,12 +564,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index b1759cf705..e21ae8ecfa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -564,12 +564,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index e807de2065..f190651cd9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -565,12 +565,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index ff3aecaee4..84747c582c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -558,12 +558,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index 9ecda69174..ac5c8b6370 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -558,12 +558,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go index af3af60db9..a20ebea633 100644 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build windows -// +build go1.9 +//go:build windows && go1.9 +// +build windows,go1.9 package windows diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go index 40af946e16..2cd60645ee 100644 --- a/vendor/golang.org/x/sys/windows/eventlog.go +++ b/vendor/golang.org/x/sys/windows/eventlog.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build windows // +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go index 1adb60739a..6dc0920a84 100644 --- a/vendor/golang.org/x/sys/windows/memory_windows.go +++ b/vendor/golang.org/x/sys/windows/memory_windows.go @@ -35,3 +35,14 @@ const ( QUOTA_LIMITS_HARDWS_MAX_DISABLE = 0x00000008 QUOTA_LIMITS_HARDWS_MAX_ENABLE = 0x00000004 ) + +type MemoryBasicInformation struct { + BaseAddress uintptr + AllocationBase uintptr + AllocationProtect uint32 + PartitionId uint16 + RegionSize uintptr + State uint32 + Protect uint32 + Type uint32 +} diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go index 328e3b2ace..8563f79c57 100644 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -2,8 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build generate // +build generate package windows -//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go setupapi_windows.go diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go index a74e3e24b5..9196b089ca 100644 --- a/vendor/golang.org/x/sys/windows/race.go +++ b/vendor/golang.org/x/sys/windows/race.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build windows && race // +build windows,race package windows diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go index e44a3cbf67..7bae4817a0 100644 --- a/vendor/golang.org/x/sys/windows/race0.go +++ b/vendor/golang.org/x/sys/windows/race0.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build windows && !race // +build windows,!race package windows diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index b269850d06..f8deca8397 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build windows // +build windows package windows @@ -16,8 +17,6 @@ const ( SC_MANAGER_ALL_ACCESS = 0xf003f ) -//sys OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenSCManagerW - const ( SERVICE_KERNEL_DRIVER = 1 SERVICE_FILE_SYSTEM_DRIVER = 2 @@ -132,6 +131,14 @@ const ( SC_EVENT_DATABASE_CHANGE = 0 SC_EVENT_PROPERTY_CHANGE = 1 SC_EVENT_STATUS_CHANGE = 2 + + SERVICE_START_REASON_DEMAND = 0x00000001 + SERVICE_START_REASON_AUTO = 0x00000002 + SERVICE_START_REASON_TRIGGER = 0x00000004 + SERVICE_START_REASON_RESTART_ON_FAILURE = 0x00000008 + SERVICE_START_REASON_DELAYEDAUTO = 0x00000010 + + SERVICE_DYNAMIC_INFORMATION_LEVEL_START_REASON = 1 ) type SERVICE_STATUS struct { @@ -216,6 +223,7 @@ type QUERY_SERVICE_LOCK_STATUS struct { LockDuration uint32 } +//sys OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenSCManagerW //sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle //sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW //sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW @@ -235,3 +243,5 @@ type QUERY_SERVICE_LOCK_STATUS struct { //sys NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) = advapi32.NotifyServiceStatusChangeW //sys SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) = sechost.SubscribeServiceChangeNotifications? //sys UnsubscribeServiceChangeNotifications(subscription uintptr) = sechost.UnsubscribeServiceChangeNotifications? +//sys RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) = advapi32.RegisterServiceCtrlHandlerExW +//sys QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInfo unsafe.Pointer) (err error) = advapi32.QueryServiceDynamicInformation? diff --git a/vendor/golang.org/x/sys/windows/setupapi_windows.go b/vendor/golang.org/x/sys/windows/setupapi_windows.go new file mode 100644 index 0000000000..14027da3f3 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/setupapi_windows.go @@ -0,0 +1,1425 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "encoding/binary" + "errors" + "fmt" + "runtime" + "strings" + "syscall" + "unsafe" +) + +// This file contains functions that wrap SetupAPI.dll and CfgMgr32.dll, +// core system functions for managing hardware devices, drivers, and the PnP tree. +// Information about these APIs can be found at: +// https://docs.microsoft.com/en-us/windows-hardware/drivers/install/setupapi +// https://docs.microsoft.com/en-us/windows/win32/devinst/cfgmgr32- + +const ( + ERROR_EXPECTED_SECTION_NAME Errno = 0x20000000 | 0xC0000000 | 0 + ERROR_BAD_SECTION_NAME_LINE Errno = 0x20000000 | 0xC0000000 | 1 + ERROR_SECTION_NAME_TOO_LONG Errno = 0x20000000 | 0xC0000000 | 2 + ERROR_GENERAL_SYNTAX Errno = 0x20000000 | 0xC0000000 | 3 + ERROR_WRONG_INF_STYLE Errno = 0x20000000 | 0xC0000000 | 0x100 + ERROR_SECTION_NOT_FOUND Errno = 0x20000000 | 0xC0000000 | 0x101 + ERROR_LINE_NOT_FOUND Errno = 0x20000000 | 0xC0000000 | 0x102 + ERROR_NO_BACKUP Errno = 0x20000000 | 0xC0000000 | 0x103 + ERROR_NO_ASSOCIATED_CLASS Errno = 0x20000000 | 0xC0000000 | 0x200 + ERROR_CLASS_MISMATCH Errno = 0x20000000 | 0xC0000000 | 0x201 + ERROR_DUPLICATE_FOUND Errno = 0x20000000 | 0xC0000000 | 0x202 + ERROR_NO_DRIVER_SELECTED Errno = 0x20000000 | 0xC0000000 | 0x203 + ERROR_KEY_DOES_NOT_EXIST Errno = 0x20000000 | 0xC0000000 | 0x204 + ERROR_INVALID_DEVINST_NAME Errno = 0x20000000 | 0xC0000000 | 0x205 + ERROR_INVALID_CLASS Errno = 0x20000000 | 0xC0000000 | 0x206 + ERROR_DEVINST_ALREADY_EXISTS Errno = 0x20000000 | 0xC0000000 | 0x207 + ERROR_DEVINFO_NOT_REGISTERED Errno = 0x20000000 | 0xC0000000 | 0x208 + ERROR_INVALID_REG_PROPERTY Errno = 0x20000000 | 0xC0000000 | 0x209 + ERROR_NO_INF Errno = 0x20000000 | 0xC0000000 | 0x20A + ERROR_NO_SUCH_DEVINST Errno = 0x20000000 | 0xC0000000 | 0x20B + ERROR_CANT_LOAD_CLASS_ICON Errno = 0x20000000 | 0xC0000000 | 0x20C + ERROR_INVALID_CLASS_INSTALLER Errno = 0x20000000 | 0xC0000000 | 0x20D + ERROR_DI_DO_DEFAULT Errno = 0x20000000 | 0xC0000000 | 0x20E + ERROR_DI_NOFILECOPY Errno = 0x20000000 | 0xC0000000 | 0x20F + ERROR_INVALID_HWPROFILE Errno = 0x20000000 | 0xC0000000 | 0x210 + ERROR_NO_DEVICE_SELECTED Errno = 0x20000000 | 0xC0000000 | 0x211 + ERROR_DEVINFO_LIST_LOCKED Errno = 0x20000000 | 0xC0000000 | 0x212 + ERROR_DEVINFO_DATA_LOCKED Errno = 0x20000000 | 0xC0000000 | 0x213 + ERROR_DI_BAD_PATH Errno = 0x20000000 | 0xC0000000 | 0x214 + ERROR_NO_CLASSINSTALL_PARAMS Errno = 0x20000000 | 0xC0000000 | 0x215 + ERROR_FILEQUEUE_LOCKED Errno = 0x20000000 | 0xC0000000 | 0x216 + ERROR_BAD_SERVICE_INSTALLSECT Errno = 0x20000000 | 0xC0000000 | 0x217 + ERROR_NO_CLASS_DRIVER_LIST Errno = 0x20000000 | 0xC0000000 | 0x218 + ERROR_NO_ASSOCIATED_SERVICE Errno = 0x20000000 | 0xC0000000 | 0x219 + ERROR_NO_DEFAULT_DEVICE_INTERFACE Errno = 0x20000000 | 0xC0000000 | 0x21A + ERROR_DEVICE_INTERFACE_ACTIVE Errno = 0x20000000 | 0xC0000000 | 0x21B + ERROR_DEVICE_INTERFACE_REMOVED Errno = 0x20000000 | 0xC0000000 | 0x21C + ERROR_BAD_INTERFACE_INSTALLSECT Errno = 0x20000000 | 0xC0000000 | 0x21D + ERROR_NO_SUCH_INTERFACE_CLASS Errno = 0x20000000 | 0xC0000000 | 0x21E + ERROR_INVALID_REFERENCE_STRING Errno = 0x20000000 | 0xC0000000 | 0x21F + ERROR_INVALID_MACHINENAME Errno = 0x20000000 | 0xC0000000 | 0x220 + ERROR_REMOTE_COMM_FAILURE Errno = 0x20000000 | 0xC0000000 | 0x221 + ERROR_MACHINE_UNAVAILABLE Errno = 0x20000000 | 0xC0000000 | 0x222 + ERROR_NO_CONFIGMGR_SERVICES Errno = 0x20000000 | 0xC0000000 | 0x223 + ERROR_INVALID_PROPPAGE_PROVIDER Errno = 0x20000000 | 0xC0000000 | 0x224 + ERROR_NO_SUCH_DEVICE_INTERFACE Errno = 0x20000000 | 0xC0000000 | 0x225 + ERROR_DI_POSTPROCESSING_REQUIRED Errno = 0x20000000 | 0xC0000000 | 0x226 + ERROR_INVALID_COINSTALLER Errno = 0x20000000 | 0xC0000000 | 0x227 + ERROR_NO_COMPAT_DRIVERS Errno = 0x20000000 | 0xC0000000 | 0x228 + ERROR_NO_DEVICE_ICON Errno = 0x20000000 | 0xC0000000 | 0x229 + ERROR_INVALID_INF_LOGCONFIG Errno = 0x20000000 | 0xC0000000 | 0x22A + ERROR_DI_DONT_INSTALL Errno = 0x20000000 | 0xC0000000 | 0x22B + ERROR_INVALID_FILTER_DRIVER Errno = 0x20000000 | 0xC0000000 | 0x22C + ERROR_NON_WINDOWS_NT_DRIVER Errno = 0x20000000 | 0xC0000000 | 0x22D + ERROR_NON_WINDOWS_DRIVER Errno = 0x20000000 | 0xC0000000 | 0x22E + ERROR_NO_CATALOG_FOR_OEM_INF Errno = 0x20000000 | 0xC0000000 | 0x22F + ERROR_DEVINSTALL_QUEUE_NONNATIVE Errno = 0x20000000 | 0xC0000000 | 0x230 + ERROR_NOT_DISABLEABLE Errno = 0x20000000 | 0xC0000000 | 0x231 + ERROR_CANT_REMOVE_DEVINST Errno = 0x20000000 | 0xC0000000 | 0x232 + ERROR_INVALID_TARGET Errno = 0x20000000 | 0xC0000000 | 0x233 + ERROR_DRIVER_NONNATIVE Errno = 0x20000000 | 0xC0000000 | 0x234 + ERROR_IN_WOW64 Errno = 0x20000000 | 0xC0000000 | 0x235 + ERROR_SET_SYSTEM_RESTORE_POINT Errno = 0x20000000 | 0xC0000000 | 0x236 + ERROR_SCE_DISABLED Errno = 0x20000000 | 0xC0000000 | 0x238 + ERROR_UNKNOWN_EXCEPTION Errno = 0x20000000 | 0xC0000000 | 0x239 + ERROR_PNP_REGISTRY_ERROR Errno = 0x20000000 | 0xC0000000 | 0x23A + ERROR_REMOTE_REQUEST_UNSUPPORTED Errno = 0x20000000 | 0xC0000000 | 0x23B + ERROR_NOT_AN_INSTALLED_OEM_INF Errno = 0x20000000 | 0xC0000000 | 0x23C + ERROR_INF_IN_USE_BY_DEVICES Errno = 0x20000000 | 0xC0000000 | 0x23D + ERROR_DI_FUNCTION_OBSOLETE Errno = 0x20000000 | 0xC0000000 | 0x23E + ERROR_NO_AUTHENTICODE_CATALOG Errno = 0x20000000 | 0xC0000000 | 0x23F + ERROR_AUTHENTICODE_DISALLOWED Errno = 0x20000000 | 0xC0000000 | 0x240 + ERROR_AUTHENTICODE_TRUSTED_PUBLISHER Errno = 0x20000000 | 0xC0000000 | 0x241 + ERROR_AUTHENTICODE_TRUST_NOT_ESTABLISHED Errno = 0x20000000 | 0xC0000000 | 0x242 + ERROR_AUTHENTICODE_PUBLISHER_NOT_TRUSTED Errno = 0x20000000 | 0xC0000000 | 0x243 + ERROR_SIGNATURE_OSATTRIBUTE_MISMATCH Errno = 0x20000000 | 0xC0000000 | 0x244 + ERROR_ONLY_VALIDATE_VIA_AUTHENTICODE Errno = 0x20000000 | 0xC0000000 | 0x245 + ERROR_DEVICE_INSTALLER_NOT_READY Errno = 0x20000000 | 0xC0000000 | 0x246 + ERROR_DRIVER_STORE_ADD_FAILED Errno = 0x20000000 | 0xC0000000 | 0x247 + ERROR_DEVICE_INSTALL_BLOCKED Errno = 0x20000000 | 0xC0000000 | 0x248 + ERROR_DRIVER_INSTALL_BLOCKED Errno = 0x20000000 | 0xC0000000 | 0x249 + ERROR_WRONG_INF_TYPE Errno = 0x20000000 | 0xC0000000 | 0x24A + ERROR_FILE_HASH_NOT_IN_CATALOG Errno = 0x20000000 | 0xC0000000 | 0x24B + ERROR_DRIVER_STORE_DELETE_FAILED Errno = 0x20000000 | 0xC0000000 | 0x24C + ERROR_UNRECOVERABLE_STACK_OVERFLOW Errno = 0x20000000 | 0xC0000000 | 0x300 + EXCEPTION_SPAPI_UNRECOVERABLE_STACK_OVERFLOW Errno = ERROR_UNRECOVERABLE_STACK_OVERFLOW + ERROR_NO_DEFAULT_INTERFACE_DEVICE Errno = ERROR_NO_DEFAULT_DEVICE_INTERFACE + ERROR_INTERFACE_DEVICE_ACTIVE Errno = ERROR_DEVICE_INTERFACE_ACTIVE + ERROR_INTERFACE_DEVICE_REMOVED Errno = ERROR_DEVICE_INTERFACE_REMOVED + ERROR_NO_SUCH_INTERFACE_DEVICE Errno = ERROR_NO_SUCH_DEVICE_INTERFACE +) + +const ( + MAX_DEVICE_ID_LEN = 200 + MAX_DEVNODE_ID_LEN = MAX_DEVICE_ID_LEN + MAX_GUID_STRING_LEN = 39 // 38 chars + terminator null + MAX_CLASS_NAME_LEN = 32 + MAX_PROFILE_LEN = 80 + MAX_CONFIG_VALUE = 9999 + MAX_INSTANCE_VALUE = 9999 + CONFIGMG_VERSION = 0x0400 +) + +// Maximum string length constants +const ( + LINE_LEN = 256 // Windows 9x-compatible maximum for displayable strings coming from a device INF. + MAX_INF_STRING_LENGTH = 4096 // Actual maximum size of an INF string (including string substitutions). + MAX_INF_SECTION_NAME_LENGTH = 255 // For Windows 9x compatibility, INF section names should be constrained to 32 characters. + MAX_TITLE_LEN = 60 + MAX_INSTRUCTION_LEN = 256 + MAX_LABEL_LEN = 30 + MAX_SERVICE_NAME_LEN = 256 + MAX_SUBTITLE_LEN = 256 +) + +const ( + // SP_MAX_MACHINENAME_LENGTH defines maximum length of a machine name in the format expected by ConfigMgr32 CM_Connect_Machine (i.e., "\\\\MachineName\0"). + SP_MAX_MACHINENAME_LENGTH = MAX_PATH + 3 +) + +// HSPFILEQ is type for setup file queue +type HSPFILEQ uintptr + +// DevInfo holds reference to device information set +type DevInfo Handle + +// DEVINST is a handle usually recognized by cfgmgr32 APIs +type DEVINST uint32 + +// DevInfoData is a device information structure (references a device instance that is a member of a device information set) +type DevInfoData struct { + size uint32 + ClassGUID GUID + DevInst DEVINST + _ uintptr +} + +// DevInfoListDetailData is a structure for detailed information on a device information set (used for SetupDiGetDeviceInfoListDetail which supersedes the functionality of SetupDiGetDeviceInfoListClass). +type DevInfoListDetailData struct { + size uint32 // Use unsafeSizeOf method + ClassGUID GUID + RemoteMachineHandle Handle + remoteMachineName [SP_MAX_MACHINENAME_LENGTH]uint16 +} + +func (*DevInfoListDetailData) unsafeSizeOf() uint32 { + if unsafe.Sizeof(uintptr(0)) == 4 { + // Windows declares this with pshpack1.h + return uint32(unsafe.Offsetof(DevInfoListDetailData{}.remoteMachineName) + unsafe.Sizeof(DevInfoListDetailData{}.remoteMachineName)) + } + return uint32(unsafe.Sizeof(DevInfoListDetailData{})) +} + +func (data *DevInfoListDetailData) RemoteMachineName() string { + return UTF16ToString(data.remoteMachineName[:]) +} + +func (data *DevInfoListDetailData) SetRemoteMachineName(remoteMachineName string) error { + str, err := UTF16FromString(remoteMachineName) + if err != nil { + return err + } + copy(data.remoteMachineName[:], str) + return nil +} + +// DI_FUNCTION is function type for device installer +type DI_FUNCTION uint32 + +const ( + DIF_SELECTDEVICE DI_FUNCTION = 0x00000001 + DIF_INSTALLDEVICE DI_FUNCTION = 0x00000002 + DIF_ASSIGNRESOURCES DI_FUNCTION = 0x00000003 + DIF_PROPERTIES DI_FUNCTION = 0x00000004 + DIF_REMOVE DI_FUNCTION = 0x00000005 + DIF_FIRSTTIMESETUP DI_FUNCTION = 0x00000006 + DIF_FOUNDDEVICE DI_FUNCTION = 0x00000007 + DIF_SELECTCLASSDRIVERS DI_FUNCTION = 0x00000008 + DIF_VALIDATECLASSDRIVERS DI_FUNCTION = 0x00000009 + DIF_INSTALLCLASSDRIVERS DI_FUNCTION = 0x0000000A + DIF_CALCDISKSPACE DI_FUNCTION = 0x0000000B + DIF_DESTROYPRIVATEDATA DI_FUNCTION = 0x0000000C + DIF_VALIDATEDRIVER DI_FUNCTION = 0x0000000D + DIF_DETECT DI_FUNCTION = 0x0000000F + DIF_INSTALLWIZARD DI_FUNCTION = 0x00000010 + DIF_DESTROYWIZARDDATA DI_FUNCTION = 0x00000011 + DIF_PROPERTYCHANGE DI_FUNCTION = 0x00000012 + DIF_ENABLECLASS DI_FUNCTION = 0x00000013 + DIF_DETECTVERIFY DI_FUNCTION = 0x00000014 + DIF_INSTALLDEVICEFILES DI_FUNCTION = 0x00000015 + DIF_UNREMOVE DI_FUNCTION = 0x00000016 + DIF_SELECTBESTCOMPATDRV DI_FUNCTION = 0x00000017 + DIF_ALLOW_INSTALL DI_FUNCTION = 0x00000018 + DIF_REGISTERDEVICE DI_FUNCTION = 0x00000019 + DIF_NEWDEVICEWIZARD_PRESELECT DI_FUNCTION = 0x0000001A + DIF_NEWDEVICEWIZARD_SELECT DI_FUNCTION = 0x0000001B + DIF_NEWDEVICEWIZARD_PREANALYZE DI_FUNCTION = 0x0000001C + DIF_NEWDEVICEWIZARD_POSTANALYZE DI_FUNCTION = 0x0000001D + DIF_NEWDEVICEWIZARD_FINISHINSTALL DI_FUNCTION = 0x0000001E + DIF_INSTALLINTERFACES DI_FUNCTION = 0x00000020 + DIF_DETECTCANCEL DI_FUNCTION = 0x00000021 + DIF_REGISTER_COINSTALLERS DI_FUNCTION = 0x00000022 + DIF_ADDPROPERTYPAGE_ADVANCED DI_FUNCTION = 0x00000023 + DIF_ADDPROPERTYPAGE_BASIC DI_FUNCTION = 0x00000024 + DIF_TROUBLESHOOTER DI_FUNCTION = 0x00000026 + DIF_POWERMESSAGEWAKE DI_FUNCTION = 0x00000027 + DIF_ADDREMOTEPROPERTYPAGE_ADVANCED DI_FUNCTION = 0x00000028 + DIF_UPDATEDRIVER_UI DI_FUNCTION = 0x00000029 + DIF_FINISHINSTALL_ACTION DI_FUNCTION = 0x0000002A +) + +// DevInstallParams is device installation parameters structure (associated with a particular device information element, or globally with a device information set) +type DevInstallParams struct { + size uint32 + Flags DI_FLAGS + FlagsEx DI_FLAGSEX + hwndParent uintptr + InstallMsgHandler uintptr + InstallMsgHandlerContext uintptr + FileQueue HSPFILEQ + _ uintptr + _ uint32 + driverPath [MAX_PATH]uint16 +} + +func (params *DevInstallParams) DriverPath() string { + return UTF16ToString(params.driverPath[:]) +} + +func (params *DevInstallParams) SetDriverPath(driverPath string) error { + str, err := UTF16FromString(driverPath) + if err != nil { + return err + } + copy(params.driverPath[:], str) + return nil +} + +// DI_FLAGS is SP_DEVINSTALL_PARAMS.Flags values +type DI_FLAGS uint32 + +const ( + // Flags for choosing a device + DI_SHOWOEM DI_FLAGS = 0x00000001 // support Other... button + DI_SHOWCOMPAT DI_FLAGS = 0x00000002 // show compatibility list + DI_SHOWCLASS DI_FLAGS = 0x00000004 // show class list + DI_SHOWALL DI_FLAGS = 0x00000007 // both class & compat list shown + DI_NOVCP DI_FLAGS = 0x00000008 // don't create a new copy queue--use caller-supplied FileQueue + DI_DIDCOMPAT DI_FLAGS = 0x00000010 // Searched for compatible devices + DI_DIDCLASS DI_FLAGS = 0x00000020 // Searched for class devices + DI_AUTOASSIGNRES DI_FLAGS = 0x00000040 // No UI for resources if possible + + // Flags returned by DiInstallDevice to indicate need to reboot/restart + DI_NEEDRESTART DI_FLAGS = 0x00000080 // Reboot required to take effect + DI_NEEDREBOOT DI_FLAGS = 0x00000100 // "" + + // Flags for device installation + DI_NOBROWSE DI_FLAGS = 0x00000200 // no Browse... in InsertDisk + + // Flags set by DiBuildDriverInfoList + DI_MULTMFGS DI_FLAGS = 0x00000400 // Set if multiple manufacturers in class driver list + + // Flag indicates that device is disabled + DI_DISABLED DI_FLAGS = 0x00000800 // Set if device disabled + + // Flags for Device/Class Properties + DI_GENERALPAGE_ADDED DI_FLAGS = 0x00001000 + DI_RESOURCEPAGE_ADDED DI_FLAGS = 0x00002000 + + // Flag to indicate the setting properties for this Device (or class) caused a change so the Dev Mgr UI probably needs to be updated. + DI_PROPERTIES_CHANGE DI_FLAGS = 0x00004000 + + // Flag to indicate that the sorting from the INF file should be used. + DI_INF_IS_SORTED DI_FLAGS = 0x00008000 + + // Flag to indicate that only the the INF specified by SP_DEVINSTALL_PARAMS.DriverPath should be searched. + DI_ENUMSINGLEINF DI_FLAGS = 0x00010000 + + // Flag that prevents ConfigMgr from removing/re-enumerating devices during device + // registration, installation, and deletion. + DI_DONOTCALLCONFIGMG DI_FLAGS = 0x00020000 + + // The following flag can be used to install a device disabled + DI_INSTALLDISABLED DI_FLAGS = 0x00040000 + + // Flag that causes SetupDiBuildDriverInfoList to build a device's compatible driver + // list from its existing class driver list, instead of the normal INF search. + DI_COMPAT_FROM_CLASS DI_FLAGS = 0x00080000 + + // This flag is set if the Class Install params should be used. + DI_CLASSINSTALLPARAMS DI_FLAGS = 0x00100000 + + // This flag is set if the caller of DiCallClassInstaller does NOT want the internal default action performed if the Class installer returns ERROR_DI_DO_DEFAULT. + DI_NODI_DEFAULTACTION DI_FLAGS = 0x00200000 + + // Flags for device installation + DI_QUIETINSTALL DI_FLAGS = 0x00800000 // don't confuse the user with questions or excess info + DI_NOFILECOPY DI_FLAGS = 0x01000000 // No file Copy necessary + DI_FORCECOPY DI_FLAGS = 0x02000000 // Force files to be copied from install path + DI_DRIVERPAGE_ADDED DI_FLAGS = 0x04000000 // Prop provider added Driver page. + DI_USECI_SELECTSTRINGS DI_FLAGS = 0x08000000 // Use Class Installer Provided strings in the Select Device Dlg + DI_OVERRIDE_INFFLAGS DI_FLAGS = 0x10000000 // Override INF flags + DI_PROPS_NOCHANGEUSAGE DI_FLAGS = 0x20000000 // No Enable/Disable in General Props + + DI_NOSELECTICONS DI_FLAGS = 0x40000000 // No small icons in select device dialogs + + DI_NOWRITE_IDS DI_FLAGS = 0x80000000 // Don't write HW & Compat IDs on install +) + +// DI_FLAGSEX is SP_DEVINSTALL_PARAMS.FlagsEx values +type DI_FLAGSEX uint32 + +const ( + DI_FLAGSEX_CI_FAILED DI_FLAGSEX = 0x00000004 // Failed to Load/Call class installer + DI_FLAGSEX_FINISHINSTALL_ACTION DI_FLAGSEX = 0x00000008 // Class/co-installer wants to get a DIF_FINISH_INSTALL action in client context. + DI_FLAGSEX_DIDINFOLIST DI_FLAGSEX = 0x00000010 // Did the Class Info List + DI_FLAGSEX_DIDCOMPATINFO DI_FLAGSEX = 0x00000020 // Did the Compat Info List + DI_FLAGSEX_FILTERCLASSES DI_FLAGSEX = 0x00000040 + DI_FLAGSEX_SETFAILEDINSTALL DI_FLAGSEX = 0x00000080 + DI_FLAGSEX_DEVICECHANGE DI_FLAGSEX = 0x00000100 + DI_FLAGSEX_ALWAYSWRITEIDS DI_FLAGSEX = 0x00000200 + DI_FLAGSEX_PROPCHANGE_PENDING DI_FLAGSEX = 0x00000400 // One or more device property sheets have had changes made to them, and need to have a DIF_PROPERTYCHANGE occur. + DI_FLAGSEX_ALLOWEXCLUDEDDRVS DI_FLAGSEX = 0x00000800 + DI_FLAGSEX_NOUIONQUERYREMOVE DI_FLAGSEX = 0x00001000 + DI_FLAGSEX_USECLASSFORCOMPAT DI_FLAGSEX = 0x00002000 // Use the device's class when building compat drv list. (Ignored if DI_COMPAT_FROM_CLASS flag is specified.) + DI_FLAGSEX_NO_DRVREG_MODIFY DI_FLAGSEX = 0x00008000 // Don't run AddReg and DelReg for device's software (driver) key. + DI_FLAGSEX_IN_SYSTEM_SETUP DI_FLAGSEX = 0x00010000 // Installation is occurring during initial system setup. + DI_FLAGSEX_INET_DRIVER DI_FLAGSEX = 0x00020000 // Driver came from Windows Update + DI_FLAGSEX_APPENDDRIVERLIST DI_FLAGSEX = 0x00040000 // Cause SetupDiBuildDriverInfoList to append a new driver list to an existing list. + DI_FLAGSEX_PREINSTALLBACKUP DI_FLAGSEX = 0x00080000 // not used + DI_FLAGSEX_BACKUPONREPLACE DI_FLAGSEX = 0x00100000 // not used + DI_FLAGSEX_DRIVERLIST_FROM_URL DI_FLAGSEX = 0x00200000 // build driver list from INF(s) retrieved from URL specified in SP_DEVINSTALL_PARAMS.DriverPath (empty string means Windows Update website) + DI_FLAGSEX_EXCLUDE_OLD_INET_DRIVERS DI_FLAGSEX = 0x00800000 // Don't include old Internet drivers when building a driver list. Ignored on Windows Vista and later. + DI_FLAGSEX_POWERPAGE_ADDED DI_FLAGSEX = 0x01000000 // class installer added their own power page + DI_FLAGSEX_FILTERSIMILARDRIVERS DI_FLAGSEX = 0x02000000 // only include similar drivers in class list + DI_FLAGSEX_INSTALLEDDRIVER DI_FLAGSEX = 0x04000000 // only add the installed driver to the class or compat driver list. Used in calls to SetupDiBuildDriverInfoList + DI_FLAGSEX_NO_CLASSLIST_NODE_MERGE DI_FLAGSEX = 0x08000000 // Don't remove identical driver nodes from the class list + DI_FLAGSEX_ALTPLATFORM_DRVSEARCH DI_FLAGSEX = 0x10000000 // Build driver list based on alternate platform information specified in associated file queue + DI_FLAGSEX_RESTART_DEVICE_ONLY DI_FLAGSEX = 0x20000000 // only restart the device drivers are being installed on as opposed to restarting all devices using those drivers. + DI_FLAGSEX_RECURSIVESEARCH DI_FLAGSEX = 0x40000000 // Tell SetupDiBuildDriverInfoList to do a recursive search + DI_FLAGSEX_SEARCH_PUBLISHED_INFS DI_FLAGSEX = 0x80000000 // Tell SetupDiBuildDriverInfoList to do a "published INF" search +) + +// ClassInstallHeader is the first member of any class install parameters structure. It contains the device installation request code that defines the format of the rest of the install parameters structure. +type ClassInstallHeader struct { + size uint32 + InstallFunction DI_FUNCTION +} + +func MakeClassInstallHeader(installFunction DI_FUNCTION) *ClassInstallHeader { + hdr := &ClassInstallHeader{InstallFunction: installFunction} + hdr.size = uint32(unsafe.Sizeof(*hdr)) + return hdr +} + +// DICS_STATE specifies values indicating a change in a device's state +type DICS_STATE uint32 + +const ( + DICS_ENABLE DICS_STATE = 0x00000001 // The device is being enabled. + DICS_DISABLE DICS_STATE = 0x00000002 // The device is being disabled. + DICS_PROPCHANGE DICS_STATE = 0x00000003 // The properties of the device have changed. + DICS_START DICS_STATE = 0x00000004 // The device is being started (if the request is for the currently active hardware profile). + DICS_STOP DICS_STATE = 0x00000005 // The device is being stopped. The driver stack will be unloaded and the CSCONFIGFLAG_DO_NOT_START flag will be set for the device. +) + +// DICS_FLAG specifies the scope of a device property change +type DICS_FLAG uint32 + +const ( + DICS_FLAG_GLOBAL DICS_FLAG = 0x00000001 // make change in all hardware profiles + DICS_FLAG_CONFIGSPECIFIC DICS_FLAG = 0x00000002 // make change in specified profile only + DICS_FLAG_CONFIGGENERAL DICS_FLAG = 0x00000004 // 1 or more hardware profile-specific changes to follow (obsolete) +) + +// PropChangeParams is a structure corresponding to a DIF_PROPERTYCHANGE install function. +type PropChangeParams struct { + ClassInstallHeader ClassInstallHeader + StateChange DICS_STATE + Scope DICS_FLAG + HwProfile uint32 +} + +// DI_REMOVEDEVICE specifies the scope of the device removal +type DI_REMOVEDEVICE uint32 + +const ( + DI_REMOVEDEVICE_GLOBAL DI_REMOVEDEVICE = 0x00000001 // Make this change in all hardware profiles. Remove information about the device from the registry. + DI_REMOVEDEVICE_CONFIGSPECIFIC DI_REMOVEDEVICE = 0x00000002 // Make this change to only the hardware profile specified by HwProfile. this flag only applies to root-enumerated devices. When Windows removes the device from the last hardware profile in which it was configured, Windows performs a global removal. +) + +// RemoveDeviceParams is a structure corresponding to a DIF_REMOVE install function. +type RemoveDeviceParams struct { + ClassInstallHeader ClassInstallHeader + Scope DI_REMOVEDEVICE + HwProfile uint32 +} + +// DrvInfoData is driver information structure (member of a driver info list that may be associated with a particular device instance, or (globally) with a device information set) +type DrvInfoData struct { + size uint32 + DriverType uint32 + _ uintptr + description [LINE_LEN]uint16 + mfgName [LINE_LEN]uint16 + providerName [LINE_LEN]uint16 + DriverDate Filetime + DriverVersion uint64 +} + +func (data *DrvInfoData) Description() string { + return UTF16ToString(data.description[:]) +} + +func (data *DrvInfoData) SetDescription(description string) error { + str, err := UTF16FromString(description) + if err != nil { + return err + } + copy(data.description[:], str) + return nil +} + +func (data *DrvInfoData) MfgName() string { + return UTF16ToString(data.mfgName[:]) +} + +func (data *DrvInfoData) SetMfgName(mfgName string) error { + str, err := UTF16FromString(mfgName) + if err != nil { + return err + } + copy(data.mfgName[:], str) + return nil +} + +func (data *DrvInfoData) ProviderName() string { + return UTF16ToString(data.providerName[:]) +} + +func (data *DrvInfoData) SetProviderName(providerName string) error { + str, err := UTF16FromString(providerName) + if err != nil { + return err + } + copy(data.providerName[:], str) + return nil +} + +// IsNewer method returns true if DrvInfoData date and version is newer than supplied parameters. +func (data *DrvInfoData) IsNewer(driverDate Filetime, driverVersion uint64) bool { + if data.DriverDate.HighDateTime > driverDate.HighDateTime { + return true + } + if data.DriverDate.HighDateTime < driverDate.HighDateTime { + return false + } + + if data.DriverDate.LowDateTime > driverDate.LowDateTime { + return true + } + if data.DriverDate.LowDateTime < driverDate.LowDateTime { + return false + } + + if data.DriverVersion > driverVersion { + return true + } + if data.DriverVersion < driverVersion { + return false + } + + return false +} + +// DrvInfoDetailData is driver information details structure (provides detailed information about a particular driver information structure) +type DrvInfoDetailData struct { + size uint32 // Use unsafeSizeOf method + InfDate Filetime + compatIDsOffset uint32 + compatIDsLength uint32 + _ uintptr + sectionName [LINE_LEN]uint16 + infFileName [MAX_PATH]uint16 + drvDescription [LINE_LEN]uint16 + hardwareID [1]uint16 +} + +func (*DrvInfoDetailData) unsafeSizeOf() uint32 { + if unsafe.Sizeof(uintptr(0)) == 4 { + // Windows declares this with pshpack1.h + return uint32(unsafe.Offsetof(DrvInfoDetailData{}.hardwareID) + unsafe.Sizeof(DrvInfoDetailData{}.hardwareID)) + } + return uint32(unsafe.Sizeof(DrvInfoDetailData{})) +} + +func (data *DrvInfoDetailData) SectionName() string { + return UTF16ToString(data.sectionName[:]) +} + +func (data *DrvInfoDetailData) InfFileName() string { + return UTF16ToString(data.infFileName[:]) +} + +func (data *DrvInfoDetailData) DrvDescription() string { + return UTF16ToString(data.drvDescription[:]) +} + +func (data *DrvInfoDetailData) HardwareID() string { + if data.compatIDsOffset > 1 { + bufW := data.getBuf() + return UTF16ToString(bufW[:wcslen(bufW)]) + } + + return "" +} + +func (data *DrvInfoDetailData) CompatIDs() []string { + a := make([]string, 0) + + if data.compatIDsLength > 0 { + bufW := data.getBuf() + bufW = bufW[data.compatIDsOffset : data.compatIDsOffset+data.compatIDsLength] + for i := 0; i < len(bufW); { + j := i + wcslen(bufW[i:]) + if i < j { + a = append(a, UTF16ToString(bufW[i:j])) + } + i = j + 1 + } + } + + return a +} + +func (data *DrvInfoDetailData) getBuf() []uint16 { + len := (data.size - uint32(unsafe.Offsetof(data.hardwareID))) / 2 + sl := struct { + addr *uint16 + len int + cap int + }{&data.hardwareID[0], int(len), int(len)} + return *(*[]uint16)(unsafe.Pointer(&sl)) +} + +// IsCompatible method tests if given hardware ID matches the driver or is listed on the compatible ID list. +func (data *DrvInfoDetailData) IsCompatible(hwid string) bool { + hwidLC := strings.ToLower(hwid) + if strings.ToLower(data.HardwareID()) == hwidLC { + return true + } + a := data.CompatIDs() + for i := range a { + if strings.ToLower(a[i]) == hwidLC { + return true + } + } + + return false +} + +// DICD flags control SetupDiCreateDeviceInfo +type DICD uint32 + +const ( + DICD_GENERATE_ID DICD = 0x00000001 + DICD_INHERIT_CLASSDRVS DICD = 0x00000002 +) + +// SUOI flags control SetupUninstallOEMInf +type SUOI uint32 + +const ( + SUOI_FORCEDELETE SUOI = 0x0001 +) + +// SPDIT flags to distinguish between class drivers and +// device drivers. (Passed in 'DriverType' parameter of +// driver information list APIs) +type SPDIT uint32 + +const ( + SPDIT_NODRIVER SPDIT = 0x00000000 + SPDIT_CLASSDRIVER SPDIT = 0x00000001 + SPDIT_COMPATDRIVER SPDIT = 0x00000002 +) + +// DIGCF flags control what is included in the device information set built by SetupDiGetClassDevs +type DIGCF uint32 + +const ( + DIGCF_DEFAULT DIGCF = 0x00000001 // only valid with DIGCF_DEVICEINTERFACE + DIGCF_PRESENT DIGCF = 0x00000002 + DIGCF_ALLCLASSES DIGCF = 0x00000004 + DIGCF_PROFILE DIGCF = 0x00000008 + DIGCF_DEVICEINTERFACE DIGCF = 0x00000010 +) + +// DIREG specifies values for SetupDiCreateDevRegKey, SetupDiOpenDevRegKey, and SetupDiDeleteDevRegKey. +type DIREG uint32 + +const ( + DIREG_DEV DIREG = 0x00000001 // Open/Create/Delete device key + DIREG_DRV DIREG = 0x00000002 // Open/Create/Delete driver key + DIREG_BOTH DIREG = 0x00000004 // Delete both driver and Device key +) + +// SPDRP specifies device registry property codes +// (Codes marked as read-only (R) may only be used for +// SetupDiGetDeviceRegistryProperty) +// +// These values should cover the same set of registry properties +// as defined by the CM_DRP codes in cfgmgr32.h. +// +// Note that SPDRP codes are zero based while CM_DRP codes are one based! +type SPDRP uint32 + +const ( + SPDRP_DEVICEDESC SPDRP = 0x00000000 // DeviceDesc (R/W) + SPDRP_HARDWAREID SPDRP = 0x00000001 // HardwareID (R/W) + SPDRP_COMPATIBLEIDS SPDRP = 0x00000002 // CompatibleIDs (R/W) + SPDRP_SERVICE SPDRP = 0x00000004 // Service (R/W) + SPDRP_CLASS SPDRP = 0x00000007 // Class (R--tied to ClassGUID) + SPDRP_CLASSGUID SPDRP = 0x00000008 // ClassGUID (R/W) + SPDRP_DRIVER SPDRP = 0x00000009 // Driver (R/W) + SPDRP_CONFIGFLAGS SPDRP = 0x0000000A // ConfigFlags (R/W) + SPDRP_MFG SPDRP = 0x0000000B // Mfg (R/W) + SPDRP_FRIENDLYNAME SPDRP = 0x0000000C // FriendlyName (R/W) + SPDRP_LOCATION_INFORMATION SPDRP = 0x0000000D // LocationInformation (R/W) + SPDRP_PHYSICAL_DEVICE_OBJECT_NAME SPDRP = 0x0000000E // PhysicalDeviceObjectName (R) + SPDRP_CAPABILITIES SPDRP = 0x0000000F // Capabilities (R) + SPDRP_UI_NUMBER SPDRP = 0x00000010 // UiNumber (R) + SPDRP_UPPERFILTERS SPDRP = 0x00000011 // UpperFilters (R/W) + SPDRP_LOWERFILTERS SPDRP = 0x00000012 // LowerFilters (R/W) + SPDRP_BUSTYPEGUID SPDRP = 0x00000013 // BusTypeGUID (R) + SPDRP_LEGACYBUSTYPE SPDRP = 0x00000014 // LegacyBusType (R) + SPDRP_BUSNUMBER SPDRP = 0x00000015 // BusNumber (R) + SPDRP_ENUMERATOR_NAME SPDRP = 0x00000016 // Enumerator Name (R) + SPDRP_SECURITY SPDRP = 0x00000017 // Security (R/W, binary form) + SPDRP_SECURITY_SDS SPDRP = 0x00000018 // Security (W, SDS form) + SPDRP_DEVTYPE SPDRP = 0x00000019 // Device Type (R/W) + SPDRP_EXCLUSIVE SPDRP = 0x0000001A // Device is exclusive-access (R/W) + SPDRP_CHARACTERISTICS SPDRP = 0x0000001B // Device Characteristics (R/W) + SPDRP_ADDRESS SPDRP = 0x0000001C // Device Address (R) + SPDRP_UI_NUMBER_DESC_FORMAT SPDRP = 0x0000001D // UiNumberDescFormat (R/W) + SPDRP_DEVICE_POWER_DATA SPDRP = 0x0000001E // Device Power Data (R) + SPDRP_REMOVAL_POLICY SPDRP = 0x0000001F // Removal Policy (R) + SPDRP_REMOVAL_POLICY_HW_DEFAULT SPDRP = 0x00000020 // Hardware Removal Policy (R) + SPDRP_REMOVAL_POLICY_OVERRIDE SPDRP = 0x00000021 // Removal Policy Override (RW) + SPDRP_INSTALL_STATE SPDRP = 0x00000022 // Device Install State (R) + SPDRP_LOCATION_PATHS SPDRP = 0x00000023 // Device Location Paths (R) + SPDRP_BASE_CONTAINERID SPDRP = 0x00000024 // Base ContainerID (R) + + SPDRP_MAXIMUM_PROPERTY SPDRP = 0x00000025 // Upper bound on ordinals +) + +// DEVPROPTYPE represents the property-data-type identifier that specifies the +// data type of a device property value in the unified device property model. +type DEVPROPTYPE uint32 + +const ( + DEVPROP_TYPEMOD_ARRAY DEVPROPTYPE = 0x00001000 + DEVPROP_TYPEMOD_LIST DEVPROPTYPE = 0x00002000 + + DEVPROP_TYPE_EMPTY DEVPROPTYPE = 0x00000000 + DEVPROP_TYPE_NULL DEVPROPTYPE = 0x00000001 + DEVPROP_TYPE_SBYTE DEVPROPTYPE = 0x00000002 + DEVPROP_TYPE_BYTE DEVPROPTYPE = 0x00000003 + DEVPROP_TYPE_INT16 DEVPROPTYPE = 0x00000004 + DEVPROP_TYPE_UINT16 DEVPROPTYPE = 0x00000005 + DEVPROP_TYPE_INT32 DEVPROPTYPE = 0x00000006 + DEVPROP_TYPE_UINT32 DEVPROPTYPE = 0x00000007 + DEVPROP_TYPE_INT64 DEVPROPTYPE = 0x00000008 + DEVPROP_TYPE_UINT64 DEVPROPTYPE = 0x00000009 + DEVPROP_TYPE_FLOAT DEVPROPTYPE = 0x0000000A + DEVPROP_TYPE_DOUBLE DEVPROPTYPE = 0x0000000B + DEVPROP_TYPE_DECIMAL DEVPROPTYPE = 0x0000000C + DEVPROP_TYPE_GUID DEVPROPTYPE = 0x0000000D + DEVPROP_TYPE_CURRENCY DEVPROPTYPE = 0x0000000E + DEVPROP_TYPE_DATE DEVPROPTYPE = 0x0000000F + DEVPROP_TYPE_FILETIME DEVPROPTYPE = 0x00000010 + DEVPROP_TYPE_BOOLEAN DEVPROPTYPE = 0x00000011 + DEVPROP_TYPE_STRING DEVPROPTYPE = 0x00000012 + DEVPROP_TYPE_STRING_LIST DEVPROPTYPE = DEVPROP_TYPE_STRING | DEVPROP_TYPEMOD_LIST + DEVPROP_TYPE_SECURITY_DESCRIPTOR DEVPROPTYPE = 0x00000013 + DEVPROP_TYPE_SECURITY_DESCRIPTOR_STRING DEVPROPTYPE = 0x00000014 + DEVPROP_TYPE_DEVPROPKEY DEVPROPTYPE = 0x00000015 + DEVPROP_TYPE_DEVPROPTYPE DEVPROPTYPE = 0x00000016 + DEVPROP_TYPE_BINARY DEVPROPTYPE = DEVPROP_TYPE_BYTE | DEVPROP_TYPEMOD_ARRAY + DEVPROP_TYPE_ERROR DEVPROPTYPE = 0x00000017 + DEVPROP_TYPE_NTSTATUS DEVPROPTYPE = 0x00000018 + DEVPROP_TYPE_STRING_INDIRECT DEVPROPTYPE = 0x00000019 + + MAX_DEVPROP_TYPE DEVPROPTYPE = 0x00000019 + MAX_DEVPROP_TYPEMOD DEVPROPTYPE = 0x00002000 + + DEVPROP_MASK_TYPE DEVPROPTYPE = 0x00000FFF + DEVPROP_MASK_TYPEMOD DEVPROPTYPE = 0x0000F000 +) + +// DEVPROPGUID specifies a property category. +type DEVPROPGUID GUID + +// DEVPROPID uniquely identifies the property within the property category. +type DEVPROPID uint32 + +const DEVPROPID_FIRST_USABLE DEVPROPID = 2 + +// DEVPROPKEY represents a device property key for a device property in the +// unified device property model. +type DEVPROPKEY struct { + FmtID DEVPROPGUID + PID DEVPROPID +} + +// CONFIGRET is a return value or error code from cfgmgr32 APIs +type CONFIGRET uint32 + +func (ret CONFIGRET) Error() string { + if win32Error, ok := ret.Unwrap().(Errno); ok { + return fmt.Sprintf("%s (CfgMgr error: 0x%08x)", win32Error.Error(), uint32(ret)) + } + return fmt.Sprintf("CfgMgr error: 0x%08x", uint32(ret)) +} + +func (ret CONFIGRET) Win32Error(defaultError Errno) Errno { + return cm_MapCrToWin32Err(ret, defaultError) +} + +func (ret CONFIGRET) Unwrap() error { + const noMatch = Errno(^uintptr(0)) + win32Error := ret.Win32Error(noMatch) + if win32Error == noMatch { + return nil + } + return win32Error +} + +const ( + CR_SUCCESS CONFIGRET = 0x00000000 + CR_DEFAULT CONFIGRET = 0x00000001 + CR_OUT_OF_MEMORY CONFIGRET = 0x00000002 + CR_INVALID_POINTER CONFIGRET = 0x00000003 + CR_INVALID_FLAG CONFIGRET = 0x00000004 + CR_INVALID_DEVNODE CONFIGRET = 0x00000005 + CR_INVALID_DEVINST = CR_INVALID_DEVNODE + CR_INVALID_RES_DES CONFIGRET = 0x00000006 + CR_INVALID_LOG_CONF CONFIGRET = 0x00000007 + CR_INVALID_ARBITRATOR CONFIGRET = 0x00000008 + CR_INVALID_NODELIST CONFIGRET = 0x00000009 + CR_DEVNODE_HAS_REQS CONFIGRET = 0x0000000A + CR_DEVINST_HAS_REQS = CR_DEVNODE_HAS_REQS + CR_INVALID_RESOURCEID CONFIGRET = 0x0000000B + CR_DLVXD_NOT_FOUND CONFIGRET = 0x0000000C + CR_NO_SUCH_DEVNODE CONFIGRET = 0x0000000D + CR_NO_SUCH_DEVINST = CR_NO_SUCH_DEVNODE + CR_NO_MORE_LOG_CONF CONFIGRET = 0x0000000E + CR_NO_MORE_RES_DES CONFIGRET = 0x0000000F + CR_ALREADY_SUCH_DEVNODE CONFIGRET = 0x00000010 + CR_ALREADY_SUCH_DEVINST = CR_ALREADY_SUCH_DEVNODE + CR_INVALID_RANGE_LIST CONFIGRET = 0x00000011 + CR_INVALID_RANGE CONFIGRET = 0x00000012 + CR_FAILURE CONFIGRET = 0x00000013 + CR_NO_SUCH_LOGICAL_DEV CONFIGRET = 0x00000014 + CR_CREATE_BLOCKED CONFIGRET = 0x00000015 + CR_NOT_SYSTEM_VM CONFIGRET = 0x00000016 + CR_REMOVE_VETOED CONFIGRET = 0x00000017 + CR_APM_VETOED CONFIGRET = 0x00000018 + CR_INVALID_LOAD_TYPE CONFIGRET = 0x00000019 + CR_BUFFER_SMALL CONFIGRET = 0x0000001A + CR_NO_ARBITRATOR CONFIGRET = 0x0000001B + CR_NO_REGISTRY_HANDLE CONFIGRET = 0x0000001C + CR_REGISTRY_ERROR CONFIGRET = 0x0000001D + CR_INVALID_DEVICE_ID CONFIGRET = 0x0000001E + CR_INVALID_DATA CONFIGRET = 0x0000001F + CR_INVALID_API CONFIGRET = 0x00000020 + CR_DEVLOADER_NOT_READY CONFIGRET = 0x00000021 + CR_NEED_RESTART CONFIGRET = 0x00000022 + CR_NO_MORE_HW_PROFILES CONFIGRET = 0x00000023 + CR_DEVICE_NOT_THERE CONFIGRET = 0x00000024 + CR_NO_SUCH_VALUE CONFIGRET = 0x00000025 + CR_WRONG_TYPE CONFIGRET = 0x00000026 + CR_INVALID_PRIORITY CONFIGRET = 0x00000027 + CR_NOT_DISABLEABLE CONFIGRET = 0x00000028 + CR_FREE_RESOURCES CONFIGRET = 0x00000029 + CR_QUERY_VETOED CONFIGRET = 0x0000002A + CR_CANT_SHARE_IRQ CONFIGRET = 0x0000002B + CR_NO_DEPENDENT CONFIGRET = 0x0000002C + CR_SAME_RESOURCES CONFIGRET = 0x0000002D + CR_NO_SUCH_REGISTRY_KEY CONFIGRET = 0x0000002E + CR_INVALID_MACHINENAME CONFIGRET = 0x0000002F + CR_REMOTE_COMM_FAILURE CONFIGRET = 0x00000030 + CR_MACHINE_UNAVAILABLE CONFIGRET = 0x00000031 + CR_NO_CM_SERVICES CONFIGRET = 0x00000032 + CR_ACCESS_DENIED CONFIGRET = 0x00000033 + CR_CALL_NOT_IMPLEMENTED CONFIGRET = 0x00000034 + CR_INVALID_PROPERTY CONFIGRET = 0x00000035 + CR_DEVICE_INTERFACE_ACTIVE CONFIGRET = 0x00000036 + CR_NO_SUCH_DEVICE_INTERFACE CONFIGRET = 0x00000037 + CR_INVALID_REFERENCE_STRING CONFIGRET = 0x00000038 + CR_INVALID_CONFLICT_LIST CONFIGRET = 0x00000039 + CR_INVALID_INDEX CONFIGRET = 0x0000003A + CR_INVALID_STRUCTURE_SIZE CONFIGRET = 0x0000003B + NUM_CR_RESULTS CONFIGRET = 0x0000003C +) + +const ( + CM_GET_DEVICE_INTERFACE_LIST_PRESENT = 0 // only currently 'live' device interfaces + CM_GET_DEVICE_INTERFACE_LIST_ALL_DEVICES = 1 // all registered device interfaces, live or not +) + +const ( + DN_ROOT_ENUMERATED = 0x00000001 // Was enumerated by ROOT + DN_DRIVER_LOADED = 0x00000002 // Has Register_Device_Driver + DN_ENUM_LOADED = 0x00000004 // Has Register_Enumerator + DN_STARTED = 0x00000008 // Is currently configured + DN_MANUAL = 0x00000010 // Manually installed + DN_NEED_TO_ENUM = 0x00000020 // May need reenumeration + DN_NOT_FIRST_TIME = 0x00000040 // Has received a config + DN_HARDWARE_ENUM = 0x00000080 // Enum generates hardware ID + DN_LIAR = 0x00000100 // Lied about can reconfig once + DN_HAS_MARK = 0x00000200 // Not CM_Create_DevInst lately + DN_HAS_PROBLEM = 0x00000400 // Need device installer + DN_FILTERED = 0x00000800 // Is filtered + DN_MOVED = 0x00001000 // Has been moved + DN_DISABLEABLE = 0x00002000 // Can be disabled + DN_REMOVABLE = 0x00004000 // Can be removed + DN_PRIVATE_PROBLEM = 0x00008000 // Has a private problem + DN_MF_PARENT = 0x00010000 // Multi function parent + DN_MF_CHILD = 0x00020000 // Multi function child + DN_WILL_BE_REMOVED = 0x00040000 // DevInst is being removed + DN_NOT_FIRST_TIMEE = 0x00080000 // Has received a config enumerate + DN_STOP_FREE_RES = 0x00100000 // When child is stopped, free resources + DN_REBAL_CANDIDATE = 0x00200000 // Don't skip during rebalance + DN_BAD_PARTIAL = 0x00400000 // This devnode's log_confs do not have same resources + DN_NT_ENUMERATOR = 0x00800000 // This devnode's is an NT enumerator + DN_NT_DRIVER = 0x01000000 // This devnode's is an NT driver + DN_NEEDS_LOCKING = 0x02000000 // Devnode need lock resume processing + DN_ARM_WAKEUP = 0x04000000 // Devnode can be the wakeup device + DN_APM_ENUMERATOR = 0x08000000 // APM aware enumerator + DN_APM_DRIVER = 0x10000000 // APM aware driver + DN_SILENT_INSTALL = 0x20000000 // Silent install + DN_NO_SHOW_IN_DM = 0x40000000 // No show in device manager + DN_BOOT_LOG_PROB = 0x80000000 // Had a problem during preassignment of boot log conf + DN_NEED_RESTART = DN_LIAR // System needs to be restarted for this Devnode to work properly + DN_DRIVER_BLOCKED = DN_NOT_FIRST_TIME // One or more drivers are blocked from loading for this Devnode + DN_LEGACY_DRIVER = DN_MOVED // This device is using a legacy driver + DN_CHILD_WITH_INVALID_ID = DN_HAS_MARK // One or more children have invalid IDs + DN_DEVICE_DISCONNECTED = DN_NEEDS_LOCKING // The function driver for a device reported that the device is not connected. Typically this means a wireless device is out of range. + DN_QUERY_REMOVE_PENDING = DN_MF_PARENT // Device is part of a set of related devices collectively pending query-removal + DN_QUERY_REMOVE_ACTIVE = DN_MF_CHILD // Device is actively engaged in a query-remove IRP + DN_CHANGEABLE_FLAGS = DN_NOT_FIRST_TIME | DN_HARDWARE_ENUM | DN_HAS_MARK | DN_DISABLEABLE | DN_REMOVABLE | DN_MF_CHILD | DN_MF_PARENT | DN_NOT_FIRST_TIMEE | DN_STOP_FREE_RES | DN_REBAL_CANDIDATE | DN_NT_ENUMERATOR | DN_NT_DRIVER | DN_SILENT_INSTALL | DN_NO_SHOW_IN_DM +) + +//sys setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) [failretval==DevInfo(InvalidHandle)] = setupapi.SetupDiCreateDeviceInfoListExW + +// SetupDiCreateDeviceInfoListEx function creates an empty device information set on a remote or a local computer and optionally associates the set with a device setup class. +func SetupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName string) (deviceInfoSet DevInfo, err error) { + var machineNameUTF16 *uint16 + if machineName != "" { + machineNameUTF16, err = UTF16PtrFromString(machineName) + if err != nil { + return + } + } + return setupDiCreateDeviceInfoListEx(classGUID, hwndParent, machineNameUTF16, 0) +} + +//sys setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) = setupapi.SetupDiGetDeviceInfoListDetailW + +// SetupDiGetDeviceInfoListDetail function retrieves information associated with a device information set including the class GUID, remote computer handle, and remote computer name. +func SetupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo) (deviceInfoSetDetailData *DevInfoListDetailData, err error) { + data := &DevInfoListDetailData{} + data.size = data.unsafeSizeOf() + + return data, setupDiGetDeviceInfoListDetail(deviceInfoSet, data) +} + +// DeviceInfoListDetail method retrieves information associated with a device information set including the class GUID, remote computer handle, and remote computer name. +func (deviceInfoSet DevInfo) DeviceInfoListDetail() (*DevInfoListDetailData, error) { + return SetupDiGetDeviceInfoListDetail(deviceInfoSet) +} + +//sys setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiCreateDeviceInfoW + +// SetupDiCreateDeviceInfo function creates a new device information element and adds it as a new member to the specified device information set. +func SetupDiCreateDeviceInfo(deviceInfoSet DevInfo, deviceName string, classGUID *GUID, deviceDescription string, hwndParent uintptr, creationFlags DICD) (deviceInfoData *DevInfoData, err error) { + deviceNameUTF16, err := UTF16PtrFromString(deviceName) + if err != nil { + return + } + + var deviceDescriptionUTF16 *uint16 + if deviceDescription != "" { + deviceDescriptionUTF16, err = UTF16PtrFromString(deviceDescription) + if err != nil { + return + } + } + + data := &DevInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiCreateDeviceInfo(deviceInfoSet, deviceNameUTF16, classGUID, deviceDescriptionUTF16, hwndParent, creationFlags, data) +} + +// CreateDeviceInfo method creates a new device information element and adds it as a new member to the specified device information set. +func (deviceInfoSet DevInfo) CreateDeviceInfo(deviceName string, classGUID *GUID, deviceDescription string, hwndParent uintptr, creationFlags DICD) (*DevInfoData, error) { + return SetupDiCreateDeviceInfo(deviceInfoSet, deviceName, classGUID, deviceDescription, hwndParent, creationFlags) +} + +//sys setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiEnumDeviceInfo + +// SetupDiEnumDeviceInfo function returns a DevInfoData structure that specifies a device information element in a device information set. +func SetupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex int) (*DevInfoData, error) { + data := &DevInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiEnumDeviceInfo(deviceInfoSet, uint32(memberIndex), data) +} + +// EnumDeviceInfo method returns a DevInfoData structure that specifies a device information element in a device information set. +func (deviceInfoSet DevInfo) EnumDeviceInfo(memberIndex int) (*DevInfoData, error) { + return SetupDiEnumDeviceInfo(deviceInfoSet, memberIndex) +} + +// SetupDiDestroyDeviceInfoList function deletes a device information set and frees all associated memory. +//sys SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) = setupapi.SetupDiDestroyDeviceInfoList + +// Close method deletes a device information set and frees all associated memory. +func (deviceInfoSet DevInfo) Close() error { + return SetupDiDestroyDeviceInfoList(deviceInfoSet) +} + +//sys SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) = setupapi.SetupDiBuildDriverInfoList + +// BuildDriverInfoList method builds a list of drivers that is associated with a specific device or with the global class driver list for a device information set. +func (deviceInfoSet DevInfo) BuildDriverInfoList(deviceInfoData *DevInfoData, driverType SPDIT) error { + return SetupDiBuildDriverInfoList(deviceInfoSet, deviceInfoData, driverType) +} + +//sys SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) = setupapi.SetupDiCancelDriverInfoSearch + +// CancelDriverInfoSearch method cancels a driver list search that is currently in progress in a different thread. +func (deviceInfoSet DevInfo) CancelDriverInfoSearch() error { + return SetupDiCancelDriverInfoSearch(deviceInfoSet) +} + +//sys setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiEnumDriverInfoW + +// SetupDiEnumDriverInfo function enumerates the members of a driver list. +func SetupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex int) (*DrvInfoData, error) { + data := &DrvInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiEnumDriverInfo(deviceInfoSet, deviceInfoData, driverType, uint32(memberIndex), data) +} + +// EnumDriverInfo method enumerates the members of a driver list. +func (deviceInfoSet DevInfo) EnumDriverInfo(deviceInfoData *DevInfoData, driverType SPDIT, memberIndex int) (*DrvInfoData, error) { + return SetupDiEnumDriverInfo(deviceInfoSet, deviceInfoData, driverType, memberIndex) +} + +//sys setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiGetSelectedDriverW + +// SetupDiGetSelectedDriver function retrieves the selected driver for a device information set or a particular device information element. +func SetupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (*DrvInfoData, error) { + data := &DrvInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiGetSelectedDriver(deviceInfoSet, deviceInfoData, data) +} + +// SelectedDriver method retrieves the selected driver for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) SelectedDriver(deviceInfoData *DevInfoData) (*DrvInfoData, error) { + return SetupDiGetSelectedDriver(deviceInfoSet, deviceInfoData) +} + +//sys SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiSetSelectedDriverW + +// SetSelectedDriver method sets, or resets, the selected driver for a device information element or the selected class driver for a device information set. +func (deviceInfoSet DevInfo) SetSelectedDriver(deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) error { + return SetupDiSetSelectedDriver(deviceInfoSet, deviceInfoData, driverInfoData) +} + +//sys setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetDriverInfoDetailW + +// SetupDiGetDriverInfoDetail function retrieves driver information detail for a device information set or a particular device information element in the device information set. +func SetupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (*DrvInfoDetailData, error) { + reqSize := uint32(2048) + for { + buf := make([]byte, reqSize) + data := (*DrvInfoDetailData)(unsafe.Pointer(&buf[0])) + data.size = data.unsafeSizeOf() + err := setupDiGetDriverInfoDetail(deviceInfoSet, deviceInfoData, driverInfoData, data, uint32(len(buf)), &reqSize) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return nil, err + } + data.size = reqSize + return data, nil + } +} + +// DriverInfoDetail method retrieves driver information detail for a device information set or a particular device information element in the device information set. +func (deviceInfoSet DevInfo) DriverInfoDetail(deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (*DrvInfoDetailData, error) { + return SetupDiGetDriverInfoDetail(deviceInfoSet, deviceInfoData, driverInfoData) +} + +//sys SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) = setupapi.SetupDiDestroyDriverInfoList + +// DestroyDriverInfoList method deletes a driver list. +func (deviceInfoSet DevInfo) DestroyDriverInfoList(deviceInfoData *DevInfoData, driverType SPDIT) error { + return SetupDiDestroyDriverInfoList(deviceInfoSet, deviceInfoData, driverType) +} + +//sys setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) [failretval==DevInfo(InvalidHandle)] = setupapi.SetupDiGetClassDevsExW + +// SetupDiGetClassDevsEx function returns a handle to a device information set that contains requested device information elements for a local or a remote computer. +func SetupDiGetClassDevsEx(classGUID *GUID, enumerator string, hwndParent uintptr, flags DIGCF, deviceInfoSet DevInfo, machineName string) (handle DevInfo, err error) { + var enumeratorUTF16 *uint16 + if enumerator != "" { + enumeratorUTF16, err = UTF16PtrFromString(enumerator) + if err != nil { + return + } + } + var machineNameUTF16 *uint16 + if machineName != "" { + machineNameUTF16, err = UTF16PtrFromString(machineName) + if err != nil { + return + } + } + return setupDiGetClassDevsEx(classGUID, enumeratorUTF16, hwndParent, flags, deviceInfoSet, machineNameUTF16, 0) +} + +// SetupDiCallClassInstaller function calls the appropriate class installer, and any registered co-installers, with the specified installation request (DIF code). +//sys SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiCallClassInstaller + +// CallClassInstaller member calls the appropriate class installer, and any registered co-installers, with the specified installation request (DIF code). +func (deviceInfoSet DevInfo) CallClassInstaller(installFunction DI_FUNCTION, deviceInfoData *DevInfoData) error { + return SetupDiCallClassInstaller(installFunction, deviceInfoSet, deviceInfoData) +} + +// SetupDiOpenDevRegKey function opens a registry key for device-specific configuration information. +//sys SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) [failretval==InvalidHandle] = setupapi.SetupDiOpenDevRegKey + +// OpenDevRegKey method opens a registry key for device-specific configuration information. +func (deviceInfoSet DevInfo) OpenDevRegKey(DeviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (Handle, error) { + return SetupDiOpenDevRegKey(deviceInfoSet, DeviceInfoData, Scope, HwProfile, KeyType, samDesired) +} + +//sys setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) = setupapi.SetupDiGetDevicePropertyW + +// SetupDiGetDeviceProperty function retrieves a specified device instance property. +func SetupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY) (value interface{}, err error) { + reqSize := uint32(256) + for { + var dataType DEVPROPTYPE + buf := make([]byte, reqSize) + err = setupDiGetDeviceProperty(deviceInfoSet, deviceInfoData, propertyKey, &dataType, &buf[0], uint32(len(buf)), &reqSize, 0) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return + } + switch dataType { + case DEVPROP_TYPE_STRING: + ret := UTF16ToString(bufToUTF16(buf)) + runtime.KeepAlive(buf) + return ret, nil + } + return nil, errors.New("unimplemented property type") + } +} + +//sys setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetDeviceRegistryPropertyW + +// SetupDiGetDeviceRegistryProperty function retrieves a specified Plug and Play device property. +func SetupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP) (value interface{}, err error) { + reqSize := uint32(256) + for { + var dataType uint32 + buf := make([]byte, reqSize) + err = setupDiGetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, &dataType, &buf[0], uint32(len(buf)), &reqSize) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return + } + return getRegistryValue(buf[:reqSize], dataType) + } +} + +func getRegistryValue(buf []byte, dataType uint32) (interface{}, error) { + switch dataType { + case REG_SZ: + ret := UTF16ToString(bufToUTF16(buf)) + runtime.KeepAlive(buf) + return ret, nil + case REG_EXPAND_SZ: + value := UTF16ToString(bufToUTF16(buf)) + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + ret := make([]uint16, 100) + for { + n, err := ExpandEnvironmentStrings(p, &ret[0], uint32(len(ret))) + if err != nil { + return "", err + } + if n <= uint32(len(ret)) { + return UTF16ToString(ret[:n]), nil + } + ret = make([]uint16, n) + } + case REG_BINARY: + return buf, nil + case REG_DWORD_LITTLE_ENDIAN: + return binary.LittleEndian.Uint32(buf), nil + case REG_DWORD_BIG_ENDIAN: + return binary.BigEndian.Uint32(buf), nil + case REG_MULTI_SZ: + bufW := bufToUTF16(buf) + a := []string{} + for i := 0; i < len(bufW); { + j := i + wcslen(bufW[i:]) + if i < j { + a = append(a, UTF16ToString(bufW[i:j])) + } + i = j + 1 + } + runtime.KeepAlive(buf) + return a, nil + case REG_QWORD_LITTLE_ENDIAN: + return binary.LittleEndian.Uint64(buf), nil + default: + return nil, fmt.Errorf("Unsupported registry value type: %v", dataType) + } +} + +// bufToUTF16 function reinterprets []byte buffer as []uint16 +func bufToUTF16(buf []byte) []uint16 { + sl := struct { + addr *uint16 + len int + cap int + }{(*uint16)(unsafe.Pointer(&buf[0])), len(buf) / 2, cap(buf) / 2} + return *(*[]uint16)(unsafe.Pointer(&sl)) +} + +// utf16ToBuf function reinterprets []uint16 as []byte +func utf16ToBuf(buf []uint16) []byte { + sl := struct { + addr *byte + len int + cap int + }{(*byte)(unsafe.Pointer(&buf[0])), len(buf) * 2, cap(buf) * 2} + return *(*[]byte)(unsafe.Pointer(&sl)) +} + +func wcslen(str []uint16) int { + for i := 0; i < len(str); i++ { + if str[i] == 0 { + return i + } + } + return len(str) +} + +// DeviceRegistryProperty method retrieves a specified Plug and Play device property. +func (deviceInfoSet DevInfo) DeviceRegistryProperty(deviceInfoData *DevInfoData, property SPDRP) (interface{}, error) { + return SetupDiGetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property) +} + +//sys setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) = setupapi.SetupDiSetDeviceRegistryPropertyW + +// SetupDiSetDeviceRegistryProperty function sets a Plug and Play device property for a device. +func SetupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffers []byte) error { + return setupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, &propertyBuffers[0], uint32(len(propertyBuffers))) +} + +// SetDeviceRegistryProperty function sets a Plug and Play device property for a device. +func (deviceInfoSet DevInfo) SetDeviceRegistryProperty(deviceInfoData *DevInfoData, property SPDRP, propertyBuffers []byte) error { + return SetupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, propertyBuffers) +} + +// SetDeviceRegistryPropertyString method sets a Plug and Play device property string for a device. +func (deviceInfoSet DevInfo) SetDeviceRegistryPropertyString(deviceInfoData *DevInfoData, property SPDRP, str string) error { + str16, err := UTF16FromString(str) + if err != nil { + return err + } + err = SetupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, utf16ToBuf(append(str16, 0))) + runtime.KeepAlive(str16) + return err +} + +//sys setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) = setupapi.SetupDiGetDeviceInstallParamsW + +// SetupDiGetDeviceInstallParams function retrieves device installation parameters for a device information set or a particular device information element. +func SetupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (*DevInstallParams, error) { + params := &DevInstallParams{} + params.size = uint32(unsafe.Sizeof(*params)) + + return params, setupDiGetDeviceInstallParams(deviceInfoSet, deviceInfoData, params) +} + +// DeviceInstallParams method retrieves device installation parameters for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) DeviceInstallParams(deviceInfoData *DevInfoData) (*DevInstallParams, error) { + return SetupDiGetDeviceInstallParams(deviceInfoSet, deviceInfoData) +} + +//sys setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) = setupapi.SetupDiGetDeviceInstanceIdW + +// SetupDiGetDeviceInstanceId function retrieves the instance ID of the device. +func SetupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (string, error) { + reqSize := uint32(1024) + for { + buf := make([]uint16, reqSize) + err := setupDiGetDeviceInstanceId(deviceInfoSet, deviceInfoData, &buf[0], uint32(len(buf)), &reqSize) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return "", err + } + return UTF16ToString(buf), nil + } +} + +// DeviceInstanceID method retrieves the instance ID of the device. +func (deviceInfoSet DevInfo) DeviceInstanceID(deviceInfoData *DevInfoData) (string, error) { + return SetupDiGetDeviceInstanceId(deviceInfoSet, deviceInfoData) +} + +// SetupDiGetClassInstallParams function retrieves class installation parameters for a device information set or a particular device information element. +//sys SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetClassInstallParamsW + +// ClassInstallParams method retrieves class installation parameters for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) ClassInstallParams(deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) error { + return SetupDiGetClassInstallParams(deviceInfoSet, deviceInfoData, classInstallParams, classInstallParamsSize, requiredSize) +} + +//sys SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) = setupapi.SetupDiSetDeviceInstallParamsW + +// SetDeviceInstallParams member sets device installation parameters for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) SetDeviceInstallParams(deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) error { + return SetupDiSetDeviceInstallParams(deviceInfoSet, deviceInfoData, deviceInstallParams) +} + +// SetupDiSetClassInstallParams function sets or clears class install parameters for a device information set or a particular device information element. +//sys SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) = setupapi.SetupDiSetClassInstallParamsW + +// SetClassInstallParams method sets or clears class install parameters for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) SetClassInstallParams(deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) error { + return SetupDiSetClassInstallParams(deviceInfoSet, deviceInfoData, classInstallParams, classInstallParamsSize) +} + +//sys setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) = setupapi.SetupDiClassNameFromGuidExW + +// SetupDiClassNameFromGuidEx function retrieves the class name associated with a class GUID. The class can be installed on a local or remote computer. +func SetupDiClassNameFromGuidEx(classGUID *GUID, machineName string) (className string, err error) { + var classNameUTF16 [MAX_CLASS_NAME_LEN]uint16 + + var machineNameUTF16 *uint16 + if machineName != "" { + machineNameUTF16, err = UTF16PtrFromString(machineName) + if err != nil { + return + } + } + + err = setupDiClassNameFromGuidEx(classGUID, &classNameUTF16[0], MAX_CLASS_NAME_LEN, nil, machineNameUTF16, 0) + if err != nil { + return + } + + className = UTF16ToString(classNameUTF16[:]) + return +} + +//sys setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) = setupapi.SetupDiClassGuidsFromNameExW + +// SetupDiClassGuidsFromNameEx function retrieves the GUIDs associated with the specified class name. This resulting list contains the classes currently installed on a local or remote computer. +func SetupDiClassGuidsFromNameEx(className string, machineName string) ([]GUID, error) { + classNameUTF16, err := UTF16PtrFromString(className) + if err != nil { + return nil, err + } + + var machineNameUTF16 *uint16 + if machineName != "" { + machineNameUTF16, err = UTF16PtrFromString(machineName) + if err != nil { + return nil, err + } + } + + reqSize := uint32(4) + for { + buf := make([]GUID, reqSize) + err = setupDiClassGuidsFromNameEx(classNameUTF16, &buf[0], uint32(len(buf)), &reqSize, machineNameUTF16, 0) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return nil, err + } + return buf[:reqSize], nil + } +} + +//sys setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiGetSelectedDevice + +// SetupDiGetSelectedDevice function retrieves the selected device information element in a device information set. +func SetupDiGetSelectedDevice(deviceInfoSet DevInfo) (*DevInfoData, error) { + data := &DevInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiGetSelectedDevice(deviceInfoSet, data) +} + +// SelectedDevice method retrieves the selected device information element in a device information set. +func (deviceInfoSet DevInfo) SelectedDevice() (*DevInfoData, error) { + return SetupDiGetSelectedDevice(deviceInfoSet) +} + +// SetupDiSetSelectedDevice function sets a device information element as the selected member of a device information set. This function is typically used by an installation wizard. +//sys SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiSetSelectedDevice + +// SetSelectedDevice method sets a device information element as the selected member of a device information set. This function is typically used by an installation wizard. +func (deviceInfoSet DevInfo) SetSelectedDevice(deviceInfoData *DevInfoData) error { + return SetupDiSetSelectedDevice(deviceInfoSet, deviceInfoData) +} + +//sys setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) = setupapi.SetupUninstallOEMInfW + +// SetupUninstallOEMInf uninstalls the specified driver. +func SetupUninstallOEMInf(infFileName string, flags SUOI) error { + infFileName16, err := UTF16PtrFromString(infFileName) + if err != nil { + return err + } + return setupUninstallOEMInf(infFileName16, flags, 0) +} + +//sys cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) = CfgMgr32.CM_MapCrToWin32Err + +//sys cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_Device_Interface_List_SizeW +//sys cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_Device_Interface_ListW + +func CM_Get_Device_Interface_List(deviceID string, interfaceClass *GUID, flags uint32) ([]string, error) { + deviceID16, err := UTF16PtrFromString(deviceID) + if err != nil { + return nil, err + } + var buf []uint16 + var buflen uint32 + for { + if ret := cm_Get_Device_Interface_List_Size(&buflen, interfaceClass, deviceID16, flags); ret != CR_SUCCESS { + return nil, ret + } + buf = make([]uint16, buflen) + if ret := cm_Get_Device_Interface_List(interfaceClass, deviceID16, &buf[0], buflen, flags); ret == CR_SUCCESS { + break + } else if ret != CR_BUFFER_SMALL { + return nil, ret + } + } + var interfaces []string + for i := 0; i < len(buf); { + j := i + wcslen(buf[i:]) + if i < j { + interfaces = append(interfaces, UTF16ToString(buf[i:j])) + } + i = j + 1 + } + if interfaces == nil { + return nil, ERROR_NO_SUCH_DEVICE_INTERFACE + } + return interfaces, nil +} + +//sys cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_DevNode_Status + +func CM_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) error { + ret := cm_Get_DevNode_Status(status, problemNumber, devInst, flags) + if ret == CR_SUCCESS { + return nil + } + return ret +} diff --git a/vendor/golang.org/x/sys/windows/setupapierrors_windows.go b/vendor/golang.org/x/sys/windows/setupapierrors_windows.go deleted file mode 100644 index 1681810e04..0000000000 --- a/vendor/golang.org/x/sys/windows/setupapierrors_windows.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -import "syscall" - -const ( - ERROR_EXPECTED_SECTION_NAME syscall.Errno = 0x20000000 | 0xC0000000 | 0 - ERROR_BAD_SECTION_NAME_LINE syscall.Errno = 0x20000000 | 0xC0000000 | 1 - ERROR_SECTION_NAME_TOO_LONG syscall.Errno = 0x20000000 | 0xC0000000 | 2 - ERROR_GENERAL_SYNTAX syscall.Errno = 0x20000000 | 0xC0000000 | 3 - ERROR_WRONG_INF_STYLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x100 - ERROR_SECTION_NOT_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x101 - ERROR_LINE_NOT_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x102 - ERROR_NO_BACKUP syscall.Errno = 0x20000000 | 0xC0000000 | 0x103 - ERROR_NO_ASSOCIATED_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x200 - ERROR_CLASS_MISMATCH syscall.Errno = 0x20000000 | 0xC0000000 | 0x201 - ERROR_DUPLICATE_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x202 - ERROR_NO_DRIVER_SELECTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x203 - ERROR_KEY_DOES_NOT_EXIST syscall.Errno = 0x20000000 | 0xC0000000 | 0x204 - ERROR_INVALID_DEVINST_NAME syscall.Errno = 0x20000000 | 0xC0000000 | 0x205 - ERROR_INVALID_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x206 - ERROR_DEVINST_ALREADY_EXISTS syscall.Errno = 0x20000000 | 0xC0000000 | 0x207 - ERROR_DEVINFO_NOT_REGISTERED syscall.Errno = 0x20000000 | 0xC0000000 | 0x208 - ERROR_INVALID_REG_PROPERTY syscall.Errno = 0x20000000 | 0xC0000000 | 0x209 - ERROR_NO_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x20A - ERROR_NO_SUCH_DEVINST syscall.Errno = 0x20000000 | 0xC0000000 | 0x20B - ERROR_CANT_LOAD_CLASS_ICON syscall.Errno = 0x20000000 | 0xC0000000 | 0x20C - ERROR_INVALID_CLASS_INSTALLER syscall.Errno = 0x20000000 | 0xC0000000 | 0x20D - ERROR_DI_DO_DEFAULT syscall.Errno = 0x20000000 | 0xC0000000 | 0x20E - ERROR_DI_NOFILECOPY syscall.Errno = 0x20000000 | 0xC0000000 | 0x20F - ERROR_INVALID_HWPROFILE syscall.Errno = 0x20000000 | 0xC0000000 | 0x210 - ERROR_NO_DEVICE_SELECTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x211 - ERROR_DEVINFO_LIST_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x212 - ERROR_DEVINFO_DATA_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x213 - ERROR_DI_BAD_PATH syscall.Errno = 0x20000000 | 0xC0000000 | 0x214 - ERROR_NO_CLASSINSTALL_PARAMS syscall.Errno = 0x20000000 | 0xC0000000 | 0x215 - ERROR_FILEQUEUE_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x216 - ERROR_BAD_SERVICE_INSTALLSECT syscall.Errno = 0x20000000 | 0xC0000000 | 0x217 - ERROR_NO_CLASS_DRIVER_LIST syscall.Errno = 0x20000000 | 0xC0000000 | 0x218 - ERROR_NO_ASSOCIATED_SERVICE syscall.Errno = 0x20000000 | 0xC0000000 | 0x219 - ERROR_NO_DEFAULT_DEVICE_INTERFACE syscall.Errno = 0x20000000 | 0xC0000000 | 0x21A - ERROR_DEVICE_INTERFACE_ACTIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x21B - ERROR_DEVICE_INTERFACE_REMOVED syscall.Errno = 0x20000000 | 0xC0000000 | 0x21C - ERROR_BAD_INTERFACE_INSTALLSECT syscall.Errno = 0x20000000 | 0xC0000000 | 0x21D - ERROR_NO_SUCH_INTERFACE_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x21E - ERROR_INVALID_REFERENCE_STRING syscall.Errno = 0x20000000 | 0xC0000000 | 0x21F - ERROR_INVALID_MACHINENAME syscall.Errno = 0x20000000 | 0xC0000000 | 0x220 - ERROR_REMOTE_COMM_FAILURE syscall.Errno = 0x20000000 | 0xC0000000 | 0x221 - ERROR_MACHINE_UNAVAILABLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x222 - ERROR_NO_CONFIGMGR_SERVICES syscall.Errno = 0x20000000 | 0xC0000000 | 0x223 - ERROR_INVALID_PROPPAGE_PROVIDER syscall.Errno = 0x20000000 | 0xC0000000 | 0x224 - ERROR_NO_SUCH_DEVICE_INTERFACE syscall.Errno = 0x20000000 | 0xC0000000 | 0x225 - ERROR_DI_POSTPROCESSING_REQUIRED syscall.Errno = 0x20000000 | 0xC0000000 | 0x226 - ERROR_INVALID_COINSTALLER syscall.Errno = 0x20000000 | 0xC0000000 | 0x227 - ERROR_NO_COMPAT_DRIVERS syscall.Errno = 0x20000000 | 0xC0000000 | 0x228 - ERROR_NO_DEVICE_ICON syscall.Errno = 0x20000000 | 0xC0000000 | 0x229 - ERROR_INVALID_INF_LOGCONFIG syscall.Errno = 0x20000000 | 0xC0000000 | 0x22A - ERROR_DI_DONT_INSTALL syscall.Errno = 0x20000000 | 0xC0000000 | 0x22B - ERROR_INVALID_FILTER_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22C - ERROR_NON_WINDOWS_NT_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22D - ERROR_NON_WINDOWS_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22E - ERROR_NO_CATALOG_FOR_OEM_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x22F - ERROR_DEVINSTALL_QUEUE_NONNATIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x230 - ERROR_NOT_DISABLEABLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x231 - ERROR_CANT_REMOVE_DEVINST syscall.Errno = 0x20000000 | 0xC0000000 | 0x232 - ERROR_INVALID_TARGET syscall.Errno = 0x20000000 | 0xC0000000 | 0x233 - ERROR_DRIVER_NONNATIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x234 - ERROR_IN_WOW64 syscall.Errno = 0x20000000 | 0xC0000000 | 0x235 - ERROR_SET_SYSTEM_RESTORE_POINT syscall.Errno = 0x20000000 | 0xC0000000 | 0x236 - ERROR_SCE_DISABLED syscall.Errno = 0x20000000 | 0xC0000000 | 0x238 - ERROR_UNKNOWN_EXCEPTION syscall.Errno = 0x20000000 | 0xC0000000 | 0x239 - ERROR_PNP_REGISTRY_ERROR syscall.Errno = 0x20000000 | 0xC0000000 | 0x23A - ERROR_REMOTE_REQUEST_UNSUPPORTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x23B - ERROR_NOT_AN_INSTALLED_OEM_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x23C - ERROR_INF_IN_USE_BY_DEVICES syscall.Errno = 0x20000000 | 0xC0000000 | 0x23D - ERROR_DI_FUNCTION_OBSOLETE syscall.Errno = 0x20000000 | 0xC0000000 | 0x23E - ERROR_NO_AUTHENTICODE_CATALOG syscall.Errno = 0x20000000 | 0xC0000000 | 0x23F - ERROR_AUTHENTICODE_DISALLOWED syscall.Errno = 0x20000000 | 0xC0000000 | 0x240 - ERROR_AUTHENTICODE_TRUSTED_PUBLISHER syscall.Errno = 0x20000000 | 0xC0000000 | 0x241 - ERROR_AUTHENTICODE_TRUST_NOT_ESTABLISHED syscall.Errno = 0x20000000 | 0xC0000000 | 0x242 - ERROR_AUTHENTICODE_PUBLISHER_NOT_TRUSTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x243 - ERROR_SIGNATURE_OSATTRIBUTE_MISMATCH syscall.Errno = 0x20000000 | 0xC0000000 | 0x244 - ERROR_ONLY_VALIDATE_VIA_AUTHENTICODE syscall.Errno = 0x20000000 | 0xC0000000 | 0x245 - ERROR_DEVICE_INSTALLER_NOT_READY syscall.Errno = 0x20000000 | 0xC0000000 | 0x246 - ERROR_DRIVER_STORE_ADD_FAILED syscall.Errno = 0x20000000 | 0xC0000000 | 0x247 - ERROR_DEVICE_INSTALL_BLOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x248 - ERROR_DRIVER_INSTALL_BLOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x249 - ERROR_WRONG_INF_TYPE syscall.Errno = 0x20000000 | 0xC0000000 | 0x24A - ERROR_FILE_HASH_NOT_IN_CATALOG syscall.Errno = 0x20000000 | 0xC0000000 | 0x24B - ERROR_DRIVER_STORE_DELETE_FAILED syscall.Errno = 0x20000000 | 0xC0000000 | 0x24C - ERROR_UNRECOVERABLE_STACK_OVERFLOW syscall.Errno = 0x20000000 | 0xC0000000 | 0x300 - EXCEPTION_SPAPI_UNRECOVERABLE_STACK_OVERFLOW syscall.Errno = ERROR_UNRECOVERABLE_STACK_OVERFLOW - ERROR_NO_DEFAULT_INTERFACE_DEVICE syscall.Errno = ERROR_NO_DEFAULT_DEVICE_INTERFACE - ERROR_INTERFACE_DEVICE_ACTIVE syscall.Errno = ERROR_DEVICE_INTERFACE_ACTIVE - ERROR_INTERFACE_DEVICE_REMOVED syscall.Errno = ERROR_DEVICE_INTERFACE_REMOVED - ERROR_NO_SUCH_INTERFACE_DEVICE syscall.Errno = ERROR_NO_SUCH_DEVICE_INTERFACE -) diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go index 917cc2aae4..4fc01434e4 100644 --- a/vendor/golang.org/x/sys/windows/str.go +++ b/vendor/golang.org/x/sys/windows/str.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build windows // +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index 6122f557a0..72074d582f 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build windows // +build windows // Package windows contains an interface to the low-level operating system diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 1215b2ae20..7702bbb8d8 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -248,6 +248,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW //sys GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW //sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW +//sys ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW //sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock //sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock //sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 @@ -274,6 +275,11 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) = kernel32.VirtualAlloc //sys VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) = kernel32.VirtualFree //sys VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) = kernel32.VirtualProtect +//sys VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) = kernel32.VirtualProtectEx +//sys VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) = kernel32.VirtualQuery +//sys VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) = kernel32.VirtualQueryEx +//sys ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) = kernel32.ReadProcessMemory +//sys WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) = kernel32.WriteProcessMemory //sys TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) = mswsock.TransmitFile //sys ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) = kernel32.ReadDirectoryChangesW //sys FindFirstChangeNotification(path string, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.FindFirstChangeNotificationW @@ -396,8 +402,18 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys LoadResource(module Handle, resInfo Handle) (resData Handle, err error) = kernel32.LoadResource //sys LockResource(resData Handle) (addr uintptr, err error) = kernel32.LockResource +// Version APIs +//sys GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32, err error) = version.GetFileVersionInfoSizeW +//sys GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) = version.GetFileVersionInfoW +//sys VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) = version.VerQueryValueW + // Process Status API (PSAPI) //sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses +//sys EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) = psapi.EnumProcessModules +//sys EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) = psapi.EnumProcessModulesEx +//sys GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) = psapi.GetModuleInformation +//sys GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) = psapi.GetModuleFileNameExW +//sys GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) = psapi.GetModuleBaseNameW // NT Native APIs //sys rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) = ntdll.RtlNtStatusToDosErrorNoTeb @@ -408,11 +424,16 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RtlInitString(destinationString *NTString, sourceString *byte) = ntdll.RtlInitString //sys NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) = ntdll.NtCreateFile //sys NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) = ntdll.NtCreateNamedPipeFile +//sys NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) = ntdll.NtSetInformationFile //sys RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToNtPathName_U_WithStatus //sys RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToRelativeNtPathName_U_WithStatus //sys RtlDefaultNpAcl(acl **ACL) (ntstatus error) = ntdll.RtlDefaultNpAcl //sys NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) = ntdll.NtQueryInformationProcess //sys NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) = ntdll.NtSetInformationProcess +//sys NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) = ntdll.NtQuerySystemInformation +//sys NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) = ntdll.NtSetSystemInformation +//sys RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) = ntdll.RtlAddFunctionTable +//sys RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) = ntdll.RtlDeleteFunctionTable // syscall interface implementation for other packages @@ -873,9 +894,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, int32, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil } @@ -895,9 +914,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil } @@ -970,9 +987,7 @@ func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -981,9 +996,7 @@ func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return nil, syscall.EAFNOSUPPORT diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 17f03312df..286dd1eab9 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -66,9 +66,21 @@ var signals = [...]string{ } const ( - FILE_LIST_DIRECTORY = 0x00000001 - FILE_APPEND_DATA = 0x00000004 + FILE_READ_DATA = 0x00000001 + FILE_READ_ATTRIBUTES = 0x00000080 + FILE_READ_EA = 0x00000008 + FILE_WRITE_DATA = 0x00000002 FILE_WRITE_ATTRIBUTES = 0x00000100 + FILE_WRITE_EA = 0x00000010 + FILE_APPEND_DATA = 0x00000004 + FILE_EXECUTE = 0x00000020 + + FILE_GENERIC_READ = STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE + FILE_GENERIC_WRITE = STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE + FILE_GENERIC_EXECUTE = STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE + + FILE_LIST_DIRECTORY = 0x00000001 + FILE_TRAVERSE = 0x00000020 FILE_SHARE_READ = 0x00000001 FILE_SHARE_WRITE = 0x00000002 @@ -242,6 +254,14 @@ const ( TH32CS_INHERIT = 0x80000000 ) +const ( + // flags for EnumProcessModulesEx + LIST_MODULES_32BIT = 0x01 + LIST_MODULES_64BIT = 0x02 + LIST_MODULES_ALL = 0x03 + LIST_MODULES_DEFAULT = 0x00 +) + const ( // filters for ReadDirectoryChangesW and FindFirstChangeNotificationW FILE_NOTIFY_CHANGE_FILE_NAME = 0x001 @@ -1781,7 +1801,53 @@ type reparseDataBuffer struct { } const ( - FSCTL_GET_REPARSE_POINT = 0x900A8 + FSCTL_CREATE_OR_GET_OBJECT_ID = 0x0900C0 + FSCTL_DELETE_OBJECT_ID = 0x0900A0 + FSCTL_DELETE_REPARSE_POINT = 0x0900AC + FSCTL_DUPLICATE_EXTENTS_TO_FILE = 0x098344 + FSCTL_DUPLICATE_EXTENTS_TO_FILE_EX = 0x0983E8 + FSCTL_FILESYSTEM_GET_STATISTICS = 0x090060 + FSCTL_FILE_LEVEL_TRIM = 0x098208 + FSCTL_FIND_FILES_BY_SID = 0x09008F + FSCTL_GET_COMPRESSION = 0x09003C + FSCTL_GET_INTEGRITY_INFORMATION = 0x09027C + FSCTL_GET_NTFS_VOLUME_DATA = 0x090064 + FSCTL_GET_REFS_VOLUME_DATA = 0x0902D8 + FSCTL_GET_OBJECT_ID = 0x09009C + FSCTL_GET_REPARSE_POINT = 0x0900A8 + FSCTL_GET_RETRIEVAL_POINTER_COUNT = 0x09042B + FSCTL_GET_RETRIEVAL_POINTERS = 0x090073 + FSCTL_GET_RETRIEVAL_POINTERS_AND_REFCOUNT = 0x0903D3 + FSCTL_IS_PATHNAME_VALID = 0x09002C + FSCTL_LMR_SET_LINK_TRACKING_INFORMATION = 0x1400EC + FSCTL_MARK_HANDLE = 0x0900FC + FSCTL_OFFLOAD_READ = 0x094264 + FSCTL_OFFLOAD_WRITE = 0x098268 + FSCTL_PIPE_PEEK = 0x11400C + FSCTL_PIPE_TRANSCEIVE = 0x11C017 + FSCTL_PIPE_WAIT = 0x110018 + FSCTL_QUERY_ALLOCATED_RANGES = 0x0940CF + FSCTL_QUERY_FAT_BPB = 0x090058 + FSCTL_QUERY_FILE_REGIONS = 0x090284 + FSCTL_QUERY_ON_DISK_VOLUME_INFO = 0x09013C + FSCTL_QUERY_SPARING_INFO = 0x090138 + FSCTL_READ_FILE_USN_DATA = 0x0900EB + FSCTL_RECALL_FILE = 0x090117 + FSCTL_REFS_STREAM_SNAPSHOT_MANAGEMENT = 0x090440 + FSCTL_SET_COMPRESSION = 0x09C040 + FSCTL_SET_DEFECT_MANAGEMENT = 0x098134 + FSCTL_SET_ENCRYPTION = 0x0900D7 + FSCTL_SET_INTEGRITY_INFORMATION = 0x09C280 + FSCTL_SET_INTEGRITY_INFORMATION_EX = 0x090380 + FSCTL_SET_OBJECT_ID = 0x090098 + FSCTL_SET_OBJECT_ID_EXTENDED = 0x0900BC + FSCTL_SET_REPARSE_POINT = 0x0900A4 + FSCTL_SET_SPARSE = 0x0900C4 + FSCTL_SET_ZERO_DATA = 0x0980C8 + FSCTL_SET_ZERO_ON_DEALLOCATION = 0x090194 + FSCTL_SIS_COPYFILE = 0x090100 + FSCTL_WRITE_USN_CLOSE_RECORD = 0x0900EF + MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024 IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 IO_REPARSE_TAG_SYMLINK = 0xA000000C @@ -2300,6 +2366,12 @@ type LIST_ENTRY struct { Blink *LIST_ENTRY } +type RUNTIME_FUNCTION struct { + BeginAddress uint32 + EndAddress uint32 + UnwindData uint32 +} + type LDR_DATA_TABLE_ENTRY struct { reserved1 [2]uintptr InMemoryOrderLinks LIST_ENTRY @@ -2490,6 +2562,60 @@ const ( FILE_PIPE_SERVER_END = 0x00000001 ) +const ( + // FileInformationClass for NtSetInformationFile + FileBasicInformation = 4 + FileRenameInformation = 10 + FileDispositionInformation = 13 + FilePositionInformation = 14 + FileEndOfFileInformation = 20 + FileValidDataLengthInformation = 39 + FileShortNameInformation = 40 + FileIoPriorityHintInformation = 43 + FileReplaceCompletionInformation = 61 + FileDispositionInformationEx = 64 + FileCaseSensitiveInformation = 71 + FileLinkInformation = 72 + FileCaseSensitiveInformationForceAccessCheck = 75 + FileKnownFolderInformation = 76 + + // Flags for FILE_RENAME_INFORMATION + FILE_RENAME_REPLACE_IF_EXISTS = 0x00000001 + FILE_RENAME_POSIX_SEMANTICS = 0x00000002 + FILE_RENAME_SUPPRESS_PIN_STATE_INHERITANCE = 0x00000004 + FILE_RENAME_SUPPRESS_STORAGE_RESERVE_INHERITANCE = 0x00000008 + FILE_RENAME_NO_INCREASE_AVAILABLE_SPACE = 0x00000010 + FILE_RENAME_NO_DECREASE_AVAILABLE_SPACE = 0x00000020 + FILE_RENAME_PRESERVE_AVAILABLE_SPACE = 0x00000030 + FILE_RENAME_IGNORE_READONLY_ATTRIBUTE = 0x00000040 + FILE_RENAME_FORCE_RESIZE_TARGET_SR = 0x00000080 + FILE_RENAME_FORCE_RESIZE_SOURCE_SR = 0x00000100 + FILE_RENAME_FORCE_RESIZE_SR = 0x00000180 + + // Flags for FILE_DISPOSITION_INFORMATION_EX + FILE_DISPOSITION_DO_NOT_DELETE = 0x00000000 + FILE_DISPOSITION_DELETE = 0x00000001 + FILE_DISPOSITION_POSIX_SEMANTICS = 0x00000002 + FILE_DISPOSITION_FORCE_IMAGE_SECTION_CHECK = 0x00000004 + FILE_DISPOSITION_ON_CLOSE = 0x00000008 + FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE = 0x00000010 + + // Flags for FILE_CASE_SENSITIVE_INFORMATION + FILE_CS_FLAG_CASE_SENSITIVE_DIR = 0x00000001 + + // Flags for FILE_LINK_INFORMATION + FILE_LINK_REPLACE_IF_EXISTS = 0x00000001 + FILE_LINK_POSIX_SEMANTICS = 0x00000002 + FILE_LINK_SUPPRESS_STORAGE_RESERVE_INHERITANCE = 0x00000008 + FILE_LINK_NO_INCREASE_AVAILABLE_SPACE = 0x00000010 + FILE_LINK_NO_DECREASE_AVAILABLE_SPACE = 0x00000020 + FILE_LINK_PRESERVE_AVAILABLE_SPACE = 0x00000030 + FILE_LINK_IGNORE_READONLY_ATTRIBUTE = 0x00000040 + FILE_LINK_FORCE_RESIZE_TARGET_SR = 0x00000080 + FILE_LINK_FORCE_RESIZE_SOURCE_SR = 0x00000100 + FILE_LINK_FORCE_RESIZE_SR = 0x00000180 +) + // ProcessInformationClasses for NtQueryInformationProcess and NtSetInformationProcess. const ( ProcessBasicInformation = iota @@ -2606,6 +2732,203 @@ type PROCESS_BASIC_INFORMATION struct { InheritedFromUniqueProcessId uintptr } +// SystemInformationClasses for NtQuerySystemInformation and NtSetSystemInformation +const ( + SystemBasicInformation = iota + SystemProcessorInformation + SystemPerformanceInformation + SystemTimeOfDayInformation + SystemPathInformation + SystemProcessInformation + SystemCallCountInformation + SystemDeviceInformation + SystemProcessorPerformanceInformation + SystemFlagsInformation + SystemCallTimeInformation + SystemModuleInformation + SystemLocksInformation + SystemStackTraceInformation + SystemPagedPoolInformation + SystemNonPagedPoolInformation + SystemHandleInformation + SystemObjectInformation + SystemPageFileInformation + SystemVdmInstemulInformation + SystemVdmBopInformation + SystemFileCacheInformation + SystemPoolTagInformation + SystemInterruptInformation + SystemDpcBehaviorInformation + SystemFullMemoryInformation + SystemLoadGdiDriverInformation + SystemUnloadGdiDriverInformation + SystemTimeAdjustmentInformation + SystemSummaryMemoryInformation + SystemMirrorMemoryInformation + SystemPerformanceTraceInformation + systemObsolete0 + SystemExceptionInformation + SystemCrashDumpStateInformation + SystemKernelDebuggerInformation + SystemContextSwitchInformation + SystemRegistryQuotaInformation + SystemExtendServiceTableInformation + SystemPrioritySeperation + SystemVerifierAddDriverInformation + SystemVerifierRemoveDriverInformation + SystemProcessorIdleInformation + SystemLegacyDriverInformation + SystemCurrentTimeZoneInformation + SystemLookasideInformation + SystemTimeSlipNotification + SystemSessionCreate + SystemSessionDetach + SystemSessionInformation + SystemRangeStartInformation + SystemVerifierInformation + SystemVerifierThunkExtend + SystemSessionProcessInformation + SystemLoadGdiDriverInSystemSpace + SystemNumaProcessorMap + SystemPrefetcherInformation + SystemExtendedProcessInformation + SystemRecommendedSharedDataAlignment + SystemComPlusPackage + SystemNumaAvailableMemory + SystemProcessorPowerInformation + SystemEmulationBasicInformation + SystemEmulationProcessorInformation + SystemExtendedHandleInformation + SystemLostDelayedWriteInformation + SystemBigPoolInformation + SystemSessionPoolTagInformation + SystemSessionMappedViewInformation + SystemHotpatchInformation + SystemObjectSecurityMode + SystemWatchdogTimerHandler + SystemWatchdogTimerInformation + SystemLogicalProcessorInformation + SystemWow64SharedInformationObsolete + SystemRegisterFirmwareTableInformationHandler + SystemFirmwareTableInformation + SystemModuleInformationEx + SystemVerifierTriageInformation + SystemSuperfetchInformation + SystemMemoryListInformation + SystemFileCacheInformationEx + SystemThreadPriorityClientIdInformation + SystemProcessorIdleCycleTimeInformation + SystemVerifierCancellationInformation + SystemProcessorPowerInformationEx + SystemRefTraceInformation + SystemSpecialPoolInformation + SystemProcessIdInformation + SystemErrorPortInformation + SystemBootEnvironmentInformation + SystemHypervisorInformation + SystemVerifierInformationEx + SystemTimeZoneInformation + SystemImageFileExecutionOptionsInformation + SystemCoverageInformation + SystemPrefetchPatchInformation + SystemVerifierFaultsInformation + SystemSystemPartitionInformation + SystemSystemDiskInformation + SystemProcessorPerformanceDistribution + SystemNumaProximityNodeInformation + SystemDynamicTimeZoneInformation + SystemCodeIntegrityInformation + SystemProcessorMicrocodeUpdateInformation + SystemProcessorBrandString + SystemVirtualAddressInformation + SystemLogicalProcessorAndGroupInformation + SystemProcessorCycleTimeInformation + SystemStoreInformation + SystemRegistryAppendString + SystemAitSamplingValue + SystemVhdBootInformation + SystemCpuQuotaInformation + SystemNativeBasicInformation + systemSpare1 + SystemLowPriorityIoInformation + SystemTpmBootEntropyInformation + SystemVerifierCountersInformation + SystemPagedPoolInformationEx + SystemSystemPtesInformationEx + SystemNodeDistanceInformation + SystemAcpiAuditInformation + SystemBasicPerformanceInformation + SystemQueryPerformanceCounterInformation + SystemSessionBigPoolInformation + SystemBootGraphicsInformation + SystemScrubPhysicalMemoryInformation + SystemBadPageInformation + SystemProcessorProfileControlArea + SystemCombinePhysicalMemoryInformation + SystemEntropyInterruptTimingCallback + SystemConsoleInformation + SystemPlatformBinaryInformation + SystemThrottleNotificationInformation + SystemHypervisorProcessorCountInformation + SystemDeviceDataInformation + SystemDeviceDataEnumerationInformation + SystemMemoryTopologyInformation + SystemMemoryChannelInformation + SystemBootLogoInformation + SystemProcessorPerformanceInformationEx + systemSpare0 + SystemSecureBootPolicyInformation + SystemPageFileInformationEx + SystemSecureBootInformation + SystemEntropyInterruptTimingRawInformation + SystemPortableWorkspaceEfiLauncherInformation + SystemFullProcessInformation + SystemKernelDebuggerInformationEx + SystemBootMetadataInformation + SystemSoftRebootInformation + SystemElamCertificateInformation + SystemOfflineDumpConfigInformation + SystemProcessorFeaturesInformation + SystemRegistryReconciliationInformation + SystemEdidInformation + SystemManufacturingInformation + SystemEnergyEstimationConfigInformation + SystemHypervisorDetailInformation + SystemProcessorCycleStatsInformation + SystemVmGenerationCountInformation + SystemTrustedPlatformModuleInformation + SystemKernelDebuggerFlags + SystemCodeIntegrityPolicyInformation + SystemIsolatedUserModeInformation + SystemHardwareSecurityTestInterfaceResultsInformation + SystemSingleModuleInformation + SystemAllowedCpuSetsInformation + SystemDmaProtectionInformation + SystemInterruptCpuSetsInformation + SystemSecureBootPolicyFullInformation + SystemCodeIntegrityPolicyFullInformation + SystemAffinitizedInterruptProcessorInformation + SystemRootSiloInformation +) + +type RTL_PROCESS_MODULE_INFORMATION struct { + Section Handle + MappedBase uintptr + ImageBase uintptr + ImageSize uint32 + Flags uint32 + LoadOrderIndex uint16 + InitOrderIndex uint16 + LoadCount uint16 + OffsetToFileName uint16 + FullPathName [256]byte +} + +type RTL_PROCESS_MODULES struct { + NumberOfModules uint32 + Modules [1]RTL_PROCESS_MODULE_INFORMATION +} + // Constants for LocalAlloc flags. const ( LMEM_FIXED = 0x0 @@ -2700,6 +3023,22 @@ var ( RT_MANIFEST ResourceID = 24 ) +type VS_FIXEDFILEINFO struct { + Signature uint32 + StrucVersion uint32 + FileVersionMS uint32 + FileVersionLS uint32 + ProductVersionMS uint32 + ProductVersionLS uint32 + FileFlagsMask uint32 + FileFlags uint32 + FileOS uint32 + FileType uint32 + FileSubtype uint32 + FileDateMS uint32 + FileDateLS uint32 +} + type COAUTHIDENTITY struct { User *uint16 UserLength uint32 @@ -2773,3 +3112,9 @@ const ( // Flag for QueryFullProcessImageName. const PROCESS_NAME_NATIVE = 1 + +type ModuleInfo struct { + BaseOfDll uintptr + SizeOfImage uint32 + EntryPoint uintptr +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 2083ec376e..bf0dd8fca8 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -36,6 +36,7 @@ func errnoErr(e syscall.Errno) error { } var ( + modCfgMgr32 = NewLazySystemDLL("CfgMgr32.dll") modadvapi32 = NewLazySystemDLL("advapi32.dll") modcrypt32 = NewLazySystemDLL("crypt32.dll") moddnsapi = NewLazySystemDLL("dnsapi.dll") @@ -48,13 +49,19 @@ var ( modpsapi = NewLazySystemDLL("psapi.dll") modsechost = NewLazySystemDLL("sechost.dll") modsecur32 = NewLazySystemDLL("secur32.dll") + modsetupapi = NewLazySystemDLL("setupapi.dll") modshell32 = NewLazySystemDLL("shell32.dll") moduser32 = NewLazySystemDLL("user32.dll") moduserenv = NewLazySystemDLL("userenv.dll") + modversion = NewLazySystemDLL("version.dll") modwintrust = NewLazySystemDLL("wintrust.dll") modws2_32 = NewLazySystemDLL("ws2_32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") + procCM_Get_DevNode_Status = modCfgMgr32.NewProc("CM_Get_DevNode_Status") + procCM_Get_Device_Interface_ListW = modCfgMgr32.NewProc("CM_Get_Device_Interface_ListW") + procCM_Get_Device_Interface_List_SizeW = modCfgMgr32.NewProc("CM_Get_Device_Interface_List_SizeW") + procCM_MapCrToWin32Err = modCfgMgr32.NewProc("CM_MapCrToWin32Err") procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") @@ -114,6 +121,7 @@ var ( procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") + procQueryServiceDynamicInformation = modadvapi32.NewProc("QueryServiceDynamicInformation") procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") @@ -124,6 +132,7 @@ var ( procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") + procRegisterServiceCtrlHandlerExW = modadvapi32.NewProc("RegisterServiceCtrlHandlerExW") procReportEventW = modadvapi32.NewProc("ReportEventW") procRevertToSelf = modadvapi32.NewProc("RevertToSelf") procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") @@ -196,6 +205,7 @@ var ( procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") procExitProcess = modkernel32.NewProc("ExitProcess") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") procFindCloseChangeNotification = modkernel32.NewProc("FindCloseChangeNotification") procFindFirstChangeNotificationW = modkernel32.NewProc("FindFirstChangeNotificationW") @@ -303,6 +313,7 @@ var ( procReadConsoleW = modkernel32.NewProc("ReadConsoleW") procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") procReadFile = modkernel32.NewProc("ReadFile") + procReadProcessMemory = modkernel32.NewProc("ReadProcessMemory") procReleaseMutex = modkernel32.NewProc("ReleaseMutex") procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") procResetEvent = modkernel32.NewProc("ResetEvent") @@ -345,12 +356,16 @@ var ( procVirtualFree = modkernel32.NewProc("VirtualFree") procVirtualLock = modkernel32.NewProc("VirtualLock") procVirtualProtect = modkernel32.NewProc("VirtualProtect") + procVirtualProtectEx = modkernel32.NewProc("VirtualProtectEx") + procVirtualQuery = modkernel32.NewProc("VirtualQuery") + procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") procWriteFile = modkernel32.NewProc("WriteFile") + procWriteProcessMemory = modkernel32.NewProc("WriteProcessMemory") procAcceptEx = modmswsock.NewProc("AcceptEx") procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") procTransmitFile = modmswsock.NewProc("TransmitFile") @@ -360,8 +375,13 @@ var ( procNtCreateFile = modntdll.NewProc("NtCreateFile") procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") + procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") + procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") procNtSetInformationProcess = modntdll.NewProc("NtSetInformationProcess") + procNtSetSystemInformation = modntdll.NewProc("NtSetSystemInformation") + procRtlAddFunctionTable = modntdll.NewProc("RtlAddFunctionTable") procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDeleteFunctionTable = modntdll.NewProc("RtlDeleteFunctionTable") procRtlDosPathNameToNtPathName_U_WithStatus = modntdll.NewProc("RtlDosPathNameToNtPathName_U_WithStatus") procRtlDosPathNameToRelativeNtPathName_U_WithStatus = modntdll.NewProc("RtlDosPathNameToRelativeNtPathName_U_WithStatus") procRtlGetCurrentPeb = modntdll.NewProc("RtlGetCurrentPeb") @@ -377,11 +397,44 @@ var ( procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") procCoUninitialize = modole32.NewProc("CoUninitialize") procStringFromGUID2 = modole32.NewProc("StringFromGUID2") + procEnumProcessModules = modpsapi.NewProc("EnumProcessModules") + procEnumProcessModulesEx = modpsapi.NewProc("EnumProcessModulesEx") procEnumProcesses = modpsapi.NewProc("EnumProcesses") + procGetModuleBaseNameW = modpsapi.NewProc("GetModuleBaseNameW") + procGetModuleFileNameExW = modpsapi.NewProc("GetModuleFileNameExW") + procGetModuleInformation = modpsapi.NewProc("GetModuleInformation") procSubscribeServiceChangeNotifications = modsechost.NewProc("SubscribeServiceChangeNotifications") procUnsubscribeServiceChangeNotifications = modsechost.NewProc("UnsubscribeServiceChangeNotifications") procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") procTranslateNameW = modsecur32.NewProc("TranslateNameW") + procSetupDiBuildDriverInfoList = modsetupapi.NewProc("SetupDiBuildDriverInfoList") + procSetupDiCallClassInstaller = modsetupapi.NewProc("SetupDiCallClassInstaller") + procSetupDiCancelDriverInfoSearch = modsetupapi.NewProc("SetupDiCancelDriverInfoSearch") + procSetupDiClassGuidsFromNameExW = modsetupapi.NewProc("SetupDiClassGuidsFromNameExW") + procSetupDiClassNameFromGuidExW = modsetupapi.NewProc("SetupDiClassNameFromGuidExW") + procSetupDiCreateDeviceInfoListExW = modsetupapi.NewProc("SetupDiCreateDeviceInfoListExW") + procSetupDiCreateDeviceInfoW = modsetupapi.NewProc("SetupDiCreateDeviceInfoW") + procSetupDiDestroyDeviceInfoList = modsetupapi.NewProc("SetupDiDestroyDeviceInfoList") + procSetupDiDestroyDriverInfoList = modsetupapi.NewProc("SetupDiDestroyDriverInfoList") + procSetupDiEnumDeviceInfo = modsetupapi.NewProc("SetupDiEnumDeviceInfo") + procSetupDiEnumDriverInfoW = modsetupapi.NewProc("SetupDiEnumDriverInfoW") + procSetupDiGetClassDevsExW = modsetupapi.NewProc("SetupDiGetClassDevsExW") + procSetupDiGetClassInstallParamsW = modsetupapi.NewProc("SetupDiGetClassInstallParamsW") + procSetupDiGetDeviceInfoListDetailW = modsetupapi.NewProc("SetupDiGetDeviceInfoListDetailW") + procSetupDiGetDeviceInstallParamsW = modsetupapi.NewProc("SetupDiGetDeviceInstallParamsW") + procSetupDiGetDeviceInstanceIdW = modsetupapi.NewProc("SetupDiGetDeviceInstanceIdW") + procSetupDiGetDevicePropertyW = modsetupapi.NewProc("SetupDiGetDevicePropertyW") + procSetupDiGetDeviceRegistryPropertyW = modsetupapi.NewProc("SetupDiGetDeviceRegistryPropertyW") + procSetupDiGetDriverInfoDetailW = modsetupapi.NewProc("SetupDiGetDriverInfoDetailW") + procSetupDiGetSelectedDevice = modsetupapi.NewProc("SetupDiGetSelectedDevice") + procSetupDiGetSelectedDriverW = modsetupapi.NewProc("SetupDiGetSelectedDriverW") + procSetupDiOpenDevRegKey = modsetupapi.NewProc("SetupDiOpenDevRegKey") + procSetupDiSetClassInstallParamsW = modsetupapi.NewProc("SetupDiSetClassInstallParamsW") + procSetupDiSetDeviceInstallParamsW = modsetupapi.NewProc("SetupDiSetDeviceInstallParamsW") + procSetupDiSetDeviceRegistryPropertyW = modsetupapi.NewProc("SetupDiSetDeviceRegistryPropertyW") + procSetupDiSetSelectedDevice = modsetupapi.NewProc("SetupDiSetSelectedDevice") + procSetupDiSetSelectedDriverW = modsetupapi.NewProc("SetupDiSetSelectedDriverW") + procSetupUninstallOEMInfW = modsetupapi.NewProc("SetupUninstallOEMInfW") procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") procShellExecuteW = modshell32.NewProc("ShellExecuteW") @@ -392,6 +445,9 @@ var ( procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") + procGetFileVersionInfoSizeW = modversion.NewProc("GetFileVersionInfoSizeW") + procGetFileVersionInfoW = modversion.NewProc("GetFileVersionInfoW") + procVerQueryValueW = modversion.NewProc("VerQueryValueW") procWinVerifyTrustEx = modwintrust.NewProc("WinVerifyTrustEx") procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") @@ -426,6 +482,30 @@ var ( procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") ) +func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) { + r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0) + ret = CONFIGRET(r0) + return +} + +func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) { + r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0) + ret = CONFIGRET(r0) + return +} + +func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) { + r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0) + ret = CONFIGRET(r0) + return +} + +func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) { + r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0) + ret = Errno(r0) + return +} + func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { var _p0 uint32 if resetToDefault { @@ -956,6 +1036,18 @@ func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, buf return } +func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInfo unsafe.Pointer) (err error) { + err = procQueryServiceDynamicInformation.Find() + if err != nil { + return + } + r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) if r1 == 0 { @@ -1045,6 +1137,15 @@ func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Hand return } +func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procRegisterServiceCtrlHandlerExW.Addr(), 3, uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) if r1 == 0 { @@ -1674,6 +1775,15 @@ func ExitProcess(exitcode uint32) { return } +func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + func FindClose(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -2631,6 +2741,14 @@ func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( return } +func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ReleaseMutex(mutex Handle) (err error) { r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) if r1 == 0 { @@ -2985,6 +3103,30 @@ func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect return } +func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procVirtualProtectEx.Addr(), 5, uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procVirtualQueryEx.Addr(), 4, uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func VirtualUnlock(addr uintptr, length uintptr) (err error) { r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) if r1 == 0 { @@ -3041,6 +3183,14 @@ func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) return } +func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procWriteProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { @@ -3110,6 +3260,22 @@ func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe return } +func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)), 0, 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) { r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0) if r0 != 0 { @@ -3118,6 +3284,20 @@ func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.P return } +func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall(procNtSetSystemInformation.Addr(), 3, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) { + r0, _, _ := syscall.Syscall(procRtlAddFunctionTable.Addr(), 3, uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) + ret = r0 != 0 + return +} + func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0) if r0 != 0 { @@ -3126,6 +3306,12 @@ func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { return } +func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) { + r0, _, _ := syscall.Syscall(procRtlDeleteFunctionTable.Addr(), 1, uintptr(unsafe.Pointer(functionTable)), 0, 0) + ret = r0 != 0 + return +} + func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) if r0 != 0 { @@ -3225,6 +3411,22 @@ func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { return } +func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procEnumProcessModules.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procEnumProcessModulesEx.Addr(), 5, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { var _p0 *uint32 if len(processIds) > 0 { @@ -3237,6 +3439,30 @@ func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { return } +func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetModuleBaseNameW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetModuleFileNameExW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetModuleInformation.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) { ret = procSubscribeServiceChangeNotifications.Find() if ret != nil { @@ -3274,6 +3500,233 @@ func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint return } +func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { + r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + handle = DevInfo(r0) + if handle == DevInfo(InvalidHandle) { + err = errnoErr(e1) + } + return +} + +func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { + r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + handle = DevInfo(r0) + if handle == DevInfo(InvalidHandle) { + err = errnoErr(e1) + } + return +} + +func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) { + r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) + key = Handle(r0) + if key == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) @@ -3359,6 +3812,58 @@ func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { return } +func GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(filename) + if err != nil { + return + } + return _GetFileVersionInfoSize(_p0, zeroHandle) +} + +func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)), 0) + bufSize = uint32(r0) + if bufSize == 0 { + err = errnoErr(e1) + } + return +} + +func GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(filename) + if err != nil { + return + } + return _GetFileVersionInfo(_p0, handle, bufSize, buffer) +} + +func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(subBlock) + if err != nil { + return + } + return _VerQueryValue(block, _p0, pointerToBufferPointer, bufSize) +} + +func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index 6b7052b892..a5c6d6d4fa 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -11,6 +11,8 @@ import ( "go/ast" "go/token" "sort" + + "golang.org/x/tools/internal/typeparams" ) // PathEnclosingInterval returns the node that encloses the source @@ -294,8 +296,8 @@ func childrenOf(n ast.Node) []ast.Node { case *ast.FieldList: children = append(children, - tok(n.Opening, len("(")), - tok(n.Closing, len(")"))) + tok(n.Opening, len("(")), // or len("[") + tok(n.Closing, len(")"))) // or len("]") case *ast.File: // TODO test: Doc @@ -322,6 +324,9 @@ func childrenOf(n ast.Node) []ast.Node { children = append(children, n.Recv) } children = append(children, n.Name) + if tparams := typeparams.ForFuncType(n.Type); tparams != nil { + children = append(children, tparams) + } if n.Type.Params != nil { children = append(children, n.Type.Params) } @@ -371,8 +376,13 @@ func childrenOf(n ast.Node) []ast.Node { case *ast.IndexExpr: children = append(children, - tok(n.Lbrack, len("{")), - tok(n.Rbrack, len("}"))) + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *typeparams.IndexListExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) case *ast.InterfaceType: children = append(children, @@ -581,6 +591,8 @@ func NodeDescription(n ast.Node) string { return "decrement statement" case *ast.IndexExpr: return "index expression" + case *typeparams.IndexListExpr: + return "index list expression" case *ast.InterfaceType: return "interface type" case *ast.KeyValueExpr: diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go index 5fe75b14c7..6d9ca23e2b 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -253,6 +253,10 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. a.apply(n, "X", nil, n.X) a.apply(n, "Index", nil, n.Index) + case *typeparams.IndexListExpr: + a.apply(n, "X", nil, n.X) + a.applyList(n, "Indices") + case *ast.SliceExpr: a.apply(n, "X", nil, n.X) a.apply(n, "Low", nil, n.Low) @@ -439,13 +443,7 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. } default: - if ix := typeparams.GetIndexExprData(n); ix != nil { - a.apply(n, "X", nil, ix.X) - // *ast.IndexExpr was handled above, so n must be an *ast.MultiIndexExpr. - a.applyList(n, "Indices") - } else { - panic(fmt.Sprintf("Apply: unexpected node type %T", n)) - } + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) } if a.post != nil && !a.post(&a.cursor) { diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index 9fc6b4beb8..961d036fdb 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -13,6 +13,7 @@ package typeparams import ( "go/ast" "go/token" + "go/types" ) // A IndexExprData holds data from both ast.IndexExpr and the new @@ -23,3 +24,9 @@ type IndexExprData struct { Indices []ast.Expr // index expressions Rbrack token.Pos // position of "]" } + +// IsTypeParam reports whether t is a type parameter. +func IsTypeParam(t types.Type) bool { + _, ok := t.(*TypeParam) + return ok +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go index 72d010e518..18212390e1 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go +++ b/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !typeparams || !go1.18 -// +build !typeparams !go1.18 +//go:build !go1.18 +// +build !go1.18 package typeparams diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go index 642fc8ee21..d67148823c 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go +++ b/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build typeparams && go1.18 -// +build typeparams,go1.18 +//go:build go1.18 +// +build go1.18 package typeparams diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go new file mode 100644 index 0000000000..090f142a5f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -0,0 +1,216 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "errors" + "fmt" + "go/types" + "os" + "strings" +) + +//go:generate go run copytermlist.go + +const debug = false + +var ErrEmptyTypeSet = errors.New("empty type set") + +// StructuralTerms returns a slice of terms representing the normalized +// structural type restrictions of a type parameter, if any. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration +// type T[P interface{~int; m()}] int +// the structural restriction of the type parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// StructuralTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, StructuralTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the +// constraint interface is invalid, exceeds complexity bounds, or has an empty +// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. +// +// StructuralTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func StructuralTerms(tparam *TypeParam) ([]*Term, error) { + constraint := tparam.Constraint() + if constraint == nil { + return nil, fmt.Errorf("%s has nil constraint", tparam) + } + iface, _ := constraint.Underlying().(*types.Interface) + if iface == nil { + return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) + } + return InterfaceTermSet(iface) +} + +// InterfaceTermSet computes the normalized terms for a constraint interface, +// returning an error if the term set cannot be computed or is empty. In the +// latter case, the error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { + return computeTermSet(iface) +} + +// UnionTermSet computes the normalized terms for a union, returning an error +// if the term set cannot be computed or is empty. In the latter case, the +// error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func UnionTermSet(union *Union) ([]*Term, error) { + return computeTermSet(union) +} + +func computeTermSet(typ types.Type) ([]*Term, error) { + tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) + if err != nil { + return nil, err + } + if tset.terms.isEmpty() { + return nil, ErrEmptyTypeSet + } + if tset.terms.isAll() { + return nil, nil + } + var terms []*Term + for _, term := range tset.terms { + terms = append(terms, NewTerm(term.tilde, term.typ)) + } + return terms, nil +} + +// A termSet holds the normalized set of terms for a given type. +// +// The name termSet is intentionally distinct from 'type set': a type set is +// all types that implement a type (and includes method restrictions), whereas +// a term set just represents the structural restrictions on a type. +type termSet struct { + complete bool + terms termlist +} + +func indentf(depth int, format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) +} + +func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { + if t == nil { + panic("nil type") + } + + if debug { + indentf(depth, "%s", t.String()) + defer func() { + if err != nil { + indentf(depth, "=> %s", err) + } else { + indentf(depth, "=> %s", res.terms.String()) + } + }() + } + + const maxTermCount = 100 + if tset, ok := seen[t]; ok { + if !tset.complete { + return nil, fmt.Errorf("cycle detected in the declaration of %s", t) + } + return tset, nil + } + + // Mark the current type as seen to avoid infinite recursion. + tset := new(termSet) + defer func() { + tset.complete = true + }() + seen[t] = tset + + switch u := t.Underlying().(type) { + case *types.Interface: + // The term set of an interface is the intersection of the term sets of its + // embedded types. + tset.terms = allTermlist + for i := 0; i < u.NumEmbeddeds(); i++ { + embedded := u.EmbeddedType(i) + if _, ok := embedded.Underlying().(*TypeParam); ok { + return nil, fmt.Errorf("invalid embedded type %T", embedded) + } + tset2, err := computeTermSetInternal(embedded, seen, depth+1) + if err != nil { + return nil, err + } + tset.terms = tset.terms.intersect(tset2.terms) + } + case *Union: + // The term set of a union is the union of term sets of its terms. + tset.terms = nil + for i := 0; i < u.Len(); i++ { + t := u.Term(i) + var terms termlist + switch t.Type().Underlying().(type) { + case *types.Interface: + tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) + if err != nil { + return nil, err + } + terms = tset2.terms + case *TypeParam, *Union: + // A stand-alone type parameter or union is not permitted as union + // term. + return nil, fmt.Errorf("invalid union term %T", t) + default: + if t.Type() == types.Typ[types.Invalid] { + continue + } + terms = termlist{{t.Tilde(), t.Type()}} + } + tset.terms = tset.terms.union(terms) + if len(tset.terms) > maxTermCount { + return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) + } + } + case *TypeParam: + panic("unreachable") + default: + // For all other types, the term set is just a single non-tilde term + // holding the type itself. + if u != types.Typ[types.Invalid] { + tset.terms = termlist{{false, t}} + } + } + return tset, nil +} + +// under is a facade for the go/types internal function of the same name. It is +// used by typeterm.go. +func under(t types.Type) types.Type { + return t.Underlying() +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go new file mode 100644 index 0000000000..10857d504c --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -0,0 +1,172 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import ( + "bytes" + "go/types" +) + +// A termlist represents the type set represented by the union +// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. +// A termlist is in normal form if all terms are disjoint. +// termlist operations don't require the operands to be in +// normal form. +type termlist []*term + +// allTermlist represents the set of all types. +// It is in normal form. +var allTermlist = termlist{new(term)} + +// String prints the termlist exactly (without normalization). +func (xl termlist) String() string { + if len(xl) == 0 { + return "∅" + } + var buf bytes.Buffer + for i, x := range xl { + if i > 0 { + buf.WriteString(" ∪ ") + } + buf.WriteString(x.String()) + } + return buf.String() +} + +// isEmpty reports whether the termlist xl represents the empty set of types. +func (xl termlist) isEmpty() bool { + // If there's a non-nil term, the entire list is not empty. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil { + return false + } + } + return true +} + +// isAll reports whether the termlist xl represents the set of all types. +func (xl termlist) isAll() bool { + // If there's a 𝓤 term, the entire list is 𝓤. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil && x.typ == nil { + return true + } + } + return false +} + +// norm returns the normal form of xl. +func (xl termlist) norm() termlist { + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + used := make([]bool, len(xl)) + var rl termlist + for i, xi := range xl { + if xi == nil || used[i] { + continue + } + for j := i + 1; j < len(xl); j++ { + xj := xl[j] + if xj == nil || used[j] { + continue + } + if u1, u2 := xi.union(xj); u2 == nil { + // If we encounter a 𝓤 term, the entire list is 𝓤. + // Exit early. + // (Note that this is not just an optimization; + // if we continue, we may end up with a 𝓤 term + // and other terms and the result would not be + // in normal form.) + if u1.typ == nil { + return allTermlist + } + xi = u1 + used[j] = true // xj is now unioned into xi - ignore it in future iterations + } + } + rl = append(rl, xi) + } + return rl +} + +// If the type set represented by xl is specified by a single (non-𝓤) term, +// structuralType returns that type. Otherwise it returns nil. +func (xl termlist) structuralType() types.Type { + if nl := xl.norm(); len(nl) == 1 { + return nl[0].typ // if nl.isAll() then typ is nil, which is ok + } + return nil +} + +// union returns the union xl ∪ yl. +func (xl termlist) union(yl termlist) termlist { + return append(xl, yl...).norm() +} + +// intersect returns the intersection xl ∩ yl. +func (xl termlist) intersect(yl termlist) termlist { + if xl.isEmpty() || yl.isEmpty() { + return nil + } + + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + var rl termlist + for _, x := range xl { + for _, y := range yl { + if r := x.intersect(y); r != nil { + rl = append(rl, r) + } + } + } + return rl.norm() +} + +// equal reports whether xl and yl represent the same type set. +func (xl termlist) equal(yl termlist) bool { + // TODO(gri) this should be more efficient + return xl.subsetOf(yl) && yl.subsetOf(xl) +} + +// includes reports whether t ∈ xl. +func (xl termlist) includes(t types.Type) bool { + for _, x := range xl { + if x.includes(t) { + return true + } + } + return false +} + +// supersetOf reports whether y ⊆ xl. +func (xl termlist) supersetOf(y *term) bool { + for _, x := range xl { + if y.subsetOf(x) { + return true + } + } + return false +} + +// subsetOf reports whether xl ⊆ yl. +func (xl termlist) subsetOf(yl termlist) bool { + if yl.isEmpty() { + return xl.isEmpty() + } + + // each term x of xl must be a subset of yl + for _, x := range xl { + if !yl.supersetOf(x) { + return false // x is not a subset yl + } + } + return true +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go index 12817af856..e509daf7be 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go @@ -2,13 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !typeparams || !go1.18 -// +build !typeparams !go1.18 +//go:build !go1.18 +// +build !go1.18 package typeparams import ( "go/ast" + "go/token" "go/types" ) @@ -30,6 +31,34 @@ func GetIndexExprData(n ast.Node) *IndexExprData { return nil } +// PackIndexExpr returns an *ast.IndexExpr with the given index. +// Calling PackIndexExpr with len(indices) != 1 will panic. +func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { + switch len(indices) { + case 0: + panic("empty indices") + case 1: + return &ast.IndexExpr{ + X: x, + Lbrack: lbrack, + Index: indices[0], + Rbrack: rbrack, + } + default: + panic("cannot pack multiple indices at this go version") + } +} + +// IndexListExpr is a placeholder type, as type parameters are not supported at +// this Go version. Its methods panic on use. +type IndexListExpr struct { + ast.Expr + X ast.Expr // expression + Lbrack token.Pos // position of "[" + Indices []ast.Expr // index expressions + Rbrack token.Pos // position of "]" +} + // ForTypeSpec returns an empty field list, as type parameters on not supported // at this Go version. func ForTypeSpec(*ast.TypeSpec) *ast.FieldList { @@ -46,6 +75,7 @@ func ForFuncType(*ast.FuncType) *ast.FieldList { // this Go version. Its methods panic on use. type TypeParam struct{ types.Type } +func (*TypeParam) Index() int { unsupported(); return 0 } func (*TypeParam) Constraint() types.Type { unsupported(); return nil } func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil } @@ -72,42 +102,46 @@ func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { unsupported() } +// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or +// typeParams is non-empty. +func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { + if len(recvTypeParams) != 0 || len(typeParams) != 0 { + panic("signatures cannot have type parameters at this Go version") + } + return types.NewSignature(recv, params, results, variadic) +} + // ForSignature returns an empty slice. func ForSignature(*types.Signature) *TypeParamList { return nil } -// SetForSignature panics if tparams is non-empty. -func SetForSignature(_ *types.Signature, tparams []*TypeParam) { - if len(tparams) > 0 { - unsupported() - } -} - // RecvTypeParams returns a nil slice. func RecvTypeParams(sig *types.Signature) *TypeParamList { return nil } -// SetRecvTypeParams panics if rparams is non-empty. -func SetRecvTypeParams(sig *types.Signature, rparams []*TypeParam) { - if len(rparams) > 0 { - unsupported() - } -} - // IsComparable returns false, as no interfaces are type-restricted at this Go // version. func IsComparable(*types.Interface) bool { return false } -// IsConstraint returns false, as no interfaces are type-restricted at this Go +// IsMethodSet returns true, as no interfaces are type-restricted at this Go // version. -func IsConstraint(*types.Interface) bool { +func IsMethodSet(*types.Interface) bool { + return true +} + +// IsImplicit returns false, as no interfaces are implicit at this Go version. +func IsImplicit(*types.Interface) bool { return false } +// MarkImplicit does nothing, because this Go version does not have implicit +// interfaces. +func MarkImplicit(*types.Interface) {} + // ForNamed returns an empty type parameter list, as type parameters are not // supported at this Go version. func ForNamed(*types.Named) *TypeParamList { @@ -131,19 +165,25 @@ func NamedTypeOrigin(named *types.Named) types.Type { return named } -// Term is a placeholder type, as type parameters are not supported at this Go -// version. Its methods panic on use. -type Term struct{} +// Term holds information about a structural type restriction. +type Term struct { + tilde bool + typ types.Type +} -func (*Term) Tilde() bool { unsupported(); return false } -func (*Term) Type() types.Type { unsupported(); return nil } -func (*Term) String() string { unsupported(); return "" } -func (*Term) Underlying() types.Type { unsupported(); return nil } +func (m *Term) Tilde() bool { return m.tilde } +func (m *Term) Type() types.Type { return m.typ } +func (m *Term) String() string { + pre := "" + if m.tilde { + pre = "~" + } + return pre + m.typ.String() +} // NewTerm is unsupported at this Go version, and panics. func NewTerm(tilde bool, typ types.Type) *Term { - unsupported() - return nil + return &Term{tilde, typ} } // Union is a placeholder type, as type parameters are not supported at this Go @@ -162,16 +202,23 @@ func NewUnion(terms []*Term) *Union { // InitInstanceInfo is a noop at this Go version. func InitInstanceInfo(*types.Info) {} -// GetInstance returns nothing, as type parameters are not supported at this Go -// version. -func GetInstance(*types.Info, *ast.Ident) (*TypeList, types.Type) { return nil, nil } +// Instance is a placeholder type, as type parameters are not supported at this +// Go version. +type Instance struct { + TypeArgs *TypeList + Type types.Type +} + +// GetInstances returns a nil map, as type parameters are not supported at this +// Go version. +func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil } -// Environment is a placeholder type, as type parameters are not supported at +// Context is a placeholder type, as type parameters are not supported at // this Go version. -type Environment struct{} +type Context struct{} // Instantiate is unsupported on this Go version, and panics. -func Instantiate(env *Environment, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { +func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { unsupported() return nil, nil } diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go index 8ab17b7779..e45896fb02 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go @@ -2,13 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build typeparams && go1.18 -// +build typeparams,go1.18 +//go:build go1.18 +// +build go1.18 package typeparams import ( "go/ast" + "go/token" "go/types" ) @@ -21,6 +22,7 @@ import ( // // For nodes that don't represent index expressions, GetIndexExprData returns // nil. +// TODO(rfindley): remove this function in favor of using the alias below. func GetIndexExprData(n ast.Node) *IndexExprData { switch e := n.(type) { case *ast.IndexExpr: @@ -36,6 +38,33 @@ func GetIndexExprData(n ast.Node) *IndexExprData { return nil } +// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on +// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 +// will panic. +func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { + switch len(indices) { + case 0: + panic("empty indices") + case 1: + return &ast.IndexExpr{ + X: x, + Lbrack: lbrack, + Index: indices[0], + Rbrack: rbrack, + } + default: + return &ast.IndexListExpr{ + X: x, + Lbrack: lbrack, + Indices: indices, + Rbrack: rbrack, + } + } +} + +// IndexListExpr is an alias for ast.IndexListExpr. +type IndexListExpr = ast.IndexListExpr + // ForTypeSpec returns n.TypeParams. func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList { if n == nil { @@ -71,34 +100,39 @@ func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { tparam.SetConstraint(constraint) } +// NewSignatureType calls types.NewSignatureType. +func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { + return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic) +} + // ForSignature returns sig.TypeParams() func ForSignature(sig *types.Signature) *TypeParamList { return sig.TypeParams() } -// SetForSignature calls sig.SetTypeParams(tparams) -func SetForSignature(sig *types.Signature, tparams []*TypeParam) { - sig.SetTypeParams(tparams) -} - // RecvTypeParams returns sig.RecvTypeParams(). func RecvTypeParams(sig *types.Signature) *TypeParamList { return sig.RecvTypeParams() } -// SetRecvTypeParams calls sig.SetRecvTypeParams(rparams). -func SetRecvTypeParams(sig *types.Signature, rparams []*TypeParam) { - sig.SetRecvTypeParams(rparams) -} - // IsComparable calls iface.IsComparable(). func IsComparable(iface *types.Interface) bool { return iface.IsComparable() } -// IsConstraint calls iface.IsConstraint(). -func IsConstraint(iface *types.Interface) bool { - return iface.IsConstraint() +// IsMethodSet calls iface.IsMethodSet(). +func IsMethodSet(iface *types.Interface) bool { + return iface.IsMethodSet() +} + +// IsImplicit calls iface.IsImplicit(). +func IsImplicit(iface *types.Interface) bool { + return iface.IsImplicit() +} + +// MarkImplicit calls iface.MarkImplicit(). +func MarkImplicit(iface *types.Interface) { + iface.MarkImplicit() } // ForNamed extracts the (possibly empty) type parameter object list from @@ -145,21 +179,18 @@ func InitInstanceInfo(info *types.Info) { info.Instances = make(map[*ast.Ident]types.Instance) } -// GetInstance extracts information about the instantiation occurring at the -// identifier id. id should be the identifier denoting a parameterized type or -// function in an instantiation expression or function call. -func GetInstance(info *types.Info, id *ast.Ident) (*TypeList, types.Type) { - if info.Instances != nil { - inf := info.Instances[id] - return inf.TypeArgs, inf.Type - } - return nil, nil +// Instance is an alias for types.Instance. +type Instance = types.Instance + +// GetInstances returns info.Instances. +func GetInstances(info *types.Info) map[*ast.Ident]Instance { + return info.Instances } -// Environment is an alias for types.Environment. -type Environment = types.Environment +// Context is an alias for types.Context. +type Context = types.Context // Instantiate calls types.Instantiate. -func Instantiate(env *Environment, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - return types.Instantiate(env, typ, targs, validate) +func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { + return types.Instantiate(ctxt, typ, targs, validate) } diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go new file mode 100644 index 0000000000..7ddee28d98 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -0,0 +1,170 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import "go/types" + +// A term describes elementary type sets: +// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t +// +type term struct { + tilde bool // valid if typ != nil + typ types.Type +} + +func (x *term) String() string { + switch { + case x == nil: + return "∅" + case x.typ == nil: + return "𝓤" + case x.tilde: + return "~" + x.typ.String() + default: + return x.typ.String() + } +} + +// equal reports whether x and y represent the same type set. +func (x *term) equal(y *term) bool { + // easy cases + switch { + case x == nil || y == nil: + return x == y + case x.typ == nil || y.typ == nil: + return x.typ == y.typ + } + // ∅ ⊂ x, y ⊂ 𝓤 + + return x.tilde == y.tilde && types.Identical(x.typ, y.typ) +} + +// union returns the union x ∪ y: zero, one, or two non-nil terms. +func (x *term) union(y *term) (_, _ *term) { + // easy cases + switch { + case x == nil && y == nil: + return nil, nil // ∅ ∪ ∅ == ∅ + case x == nil: + return y, nil // ∅ ∪ y == y + case y == nil: + return x, nil // x ∪ ∅ == x + case x.typ == nil: + return x, nil // 𝓤 ∪ y == 𝓤 + case y.typ == nil: + return y, nil // x ∪ 𝓤 == 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return x, y // x ∪ y == (x, y) if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∪ ~t == ~t + // ~t ∪ T == ~t + // T ∪ ~t == ~t + // T ∪ T == T + if x.tilde || !y.tilde { + return x, nil + } + return y, nil +} + +// intersect returns the intersection x ∩ y. +func (x *term) intersect(y *term) *term { + // easy cases + switch { + case x == nil || y == nil: + return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ + case x.typ == nil: + return y // 𝓤 ∩ y == y + case y.typ == nil: + return x // x ∩ 𝓤 == x + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return nil // x ∩ y == ∅ if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∩ ~t == ~t + // ~t ∩ T == T + // T ∩ ~t == T + // T ∩ T == T + if !x.tilde || y.tilde { + return x + } + return y +} + +// includes reports whether t ∈ x. +func (x *term) includes(t types.Type) bool { + // easy cases + switch { + case x == nil: + return false // t ∈ ∅ == false + case x.typ == nil: + return true // t ∈ 𝓤 == true + } + // ∅ ⊂ x ⊂ 𝓤 + + u := t + if x.tilde { + u = under(u) + } + return types.Identical(x.typ, u) +} + +// subsetOf reports whether x ⊆ y. +func (x *term) subsetOf(y *term) bool { + // easy cases + switch { + case x == nil: + return true // ∅ ⊆ y == true + case y == nil: + return false // x ⊆ ∅ == false since x != ∅ + case y.typ == nil: + return true // x ⊆ 𝓤 == true + case x.typ == nil: + return false // 𝓤 ⊆ y == false since y != 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return false // x ⊆ y == false if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ⊆ ~t == true + // ~t ⊆ T == false + // T ⊆ ~t == true + // T ⊆ T == true + return !x.tilde || y.tilde +} + +// disjoint reports whether x ∩ y == ∅. +// x.typ and y.typ must not be nil. +func (x *term) disjoint(y *term) bool { + if debug && (x.typ == nil || y.typ == nil) { + panic("invalid argument(s)") + } + ux := x.typ + if y.tilde { + ux = under(ux) + } + uy := y.typ + if x.tilde { + uy = under(uy) + } + return !types.Identical(ux, uy) +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index b47efe82b2..7ea5ced87e 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -56,12 +56,15 @@ const ( // // // The raw HTTP body is bound to this field. // google.api.HttpBody http_body = 2; +// // } // // service ResourceService { -// rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); -// rpc UpdateResource(google.api.HttpBody) returns -// (google.protobuf.Empty); +// rpc GetResource(GetResourceRequest) +// returns (google.api.HttpBody); +// rpc UpdateResource(google.api.HttpBody) +// returns (google.protobuf.Empty); +// // } // // Example with streaming methods: @@ -71,6 +74,7 @@ const ( // returns (stream google.api.HttpBody); // rpc UpdateCalendar(stream google.api.HttpBody) // returns (stream google.api.HttpBody); +// // } // // Use of this type only changes how the request and response bodies are diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 3220d87be4..6ff2792ee4 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -25,55 +25,75 @@ // later release. package attributes -import "fmt" - // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own -// types for keys. +// types for keys. Values should not be modified after they are added to an +// Attributes or if they were received from one. If values implement 'Equal(o +// interface{}) bool', it will be called by (*Attributes).Equal to determine +// whether two values with the same key should be considered equal. type Attributes struct { m map[interface{}]interface{} } -// New returns a new Attributes containing all key/value pairs in kvs. If the -// same key appears multiple times, the last value overwrites all previous -// values for that key. Panics if len(kvs) is not even. -func New(kvs ...interface{}) *Attributes { - if len(kvs)%2 != 0 { - panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) - } - a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)} - for i := 0; i < len(kvs)/2; i++ { - a.m[kvs[i*2]] = kvs[i*2+1] - } - return a +// New returns a new Attributes containing the key/value pair. +func New(key, value interface{}) *Attributes { + return &Attributes{m: map[interface{}]interface{}{key: value}} } -// WithValues returns a new Attributes containing all key/value pairs in a and -// kvs. Panics if len(kvs) is not even. If the same key appears multiple -// times, the last value overwrites all previous values for that key. To -// remove an existing key, use a nil value. -func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { +// WithValue returns a new Attributes containing the previous keys and values +// and the new key/value pair. If the same key appears multiple times, the +// last value overwrites all previous values for that key. To remove an +// existing key, use a nil value. value should not be modified later. +func (a *Attributes) WithValue(key, value interface{}) *Attributes { if a == nil { - return New(kvs...) + return New(key, value) } - if len(kvs)%2 != 0 { - panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) - } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)} + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } - for i := 0; i < len(kvs)/2; i++ { - n.m[kvs[i*2]] = kvs[i*2+1] - } + n.m[key] = value return n } // Value returns the value associated with these attributes for key, or nil if -// no value is associated with key. +// no value is associated with key. The returned value should not be modified. func (a *Attributes) Value(key interface{}) interface{} { if a == nil { return nil } return a.m[key] } + +// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) +// bool' is implemented for a value in the attributes, it is called to +// determine if the value matches the one stored in the other attributes. If +// Equal is not implemented, standard equality is used to determine if the two +// values are equal. +func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true + } + if a == nil || o == nil { + return false + } + if len(a.m) != len(o.m) { + return false + } + for k, v := range a.m { + ov, ok := o.m[k] + if !ok { + // o missing element of a + return false + } + if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if !eq.Equal(ov) { + return false + } + } else if v != ov { + // Fallback to a standard equality check if Value is unimplemented. + return false + } + } + return true +} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 8dd504299f..a67074a3ad 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -22,7 +22,6 @@ import ( "errors" "fmt" - "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" @@ -42,7 +41,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: make(map[resolver.Address]subConnInfo), + subConns: resolver.NewAddressMap(), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, @@ -58,11 +57,6 @@ func (bb *baseBuilder) Name() string { return bb.name } -type subConnInfo struct { - subConn balancer.SubConn - attrs *attributes.Attributes -} - type baseBalancer struct { cc balancer.ClientConn pickerBuilder PickerBuilder @@ -70,7 +64,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns map[resolver.Address]subConnInfo // `attributes` is stripped from the keys of this map (the addresses) + subConns *resolver.AddressMap scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -81,7 +75,7 @@ type baseBalancer struct { func (b *baseBalancer) ResolverError(err error) { b.resolverErr = err - if len(b.subConns) == 0 { + if b.subConns.Len() == 0 { b.state = connectivity.TransientFailure } @@ -105,53 +99,29 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. - addrsSet := make(map[resolver.Address]struct{}) + addrsSet := resolver.NewAddressMap() for _, a := range s.ResolverState.Addresses { - // Strip attributes from addresses before using them as map keys. So - // that when two addresses only differ in attributes pointers (but with - // the same attribute content), they are considered the same address. - // - // Note that this doesn't handle the case where the attribute content is - // different. So if users want to set different attributes to create - // duplicate connections to the same backend, it doesn't work. This is - // fine for now, because duplicate is done by setting Metadata today. - // - // TODO: read attributes to handle duplicate connections. - aNoAttrs := a - aNoAttrs.Attributes = nil - addrsSet[aNoAttrs] = struct{}{} - if scInfo, ok := b.subConns[aNoAttrs]; !ok { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - // - // When creating SubConn, the original address with attributes is - // passed through. So that connection configurations in attributes - // (like creds) will be used. sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } - b.subConns[aNoAttrs] = subConnInfo{subConn: sc, attrs: a.Attributes} + b.subConns.Set(a, sc) b.scStates[sc] = connectivity.Idle b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) sc.Connect() - } else { - // Always update the subconn's address in case the attributes - // changed. - // - // The SubConn does a reflect.DeepEqual of the new and old - // addresses. So this is a noop if the current address is the same - // as the old one (including attributes). - scInfo.attrs = a.Attributes - b.subConns[aNoAttrs] = scInfo - b.cc.UpdateAddresses(scInfo.subConn, []resolver.Address{a}) } } - for a, scInfo := range b.subConns { + for _, a := range b.subConns.Keys() { + sci, _ := b.subConns.Get(a) + sc := sci.(balancer.SubConn) // a was removed by resolver. - if _, ok := addrsSet[a]; !ok { - b.cc.RemoveSubConn(scInfo.subConn) - delete(b.subConns, a) + if _, ok := addrsSet.Get(a); !ok { + b.cc.RemoveSubConn(sc) + b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. } @@ -193,10 +163,11 @@ func (b *baseBalancer) regeneratePicker() { readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. - for addr, scInfo := range b.subConns { - if st, ok := b.scStates[scInfo.subConn]; ok && st == connectivity.Ready { - addr.Attributes = scInfo.attrs - readySCs[scInfo.subConn] = SubConnInfo{Address: addr} + for _, addr := range b.subConns.Keys() { + sci, _ := b.subConns.Get(addr) + sc := sci.(balancer.SubConn) + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} } } b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go index a24264a34f..4ecfa1c215 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -39,7 +39,7 @@ type State struct { // Set returns a copy of the provided state with attributes containing s. s's // data should not be mutated after calling Set. func Set(state resolver.State, s *State) resolver.State { - state.Attributes = state.Attributes.WithValues(key, s) + state.Attributes = state.Attributes.WithValue(key, s) return state } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 34cc4c948d..5a9e7d754f 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "math" + "net/url" "reflect" "strings" "sync" @@ -37,7 +38,6 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/grpcutil" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -248,38 +248,15 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } // Determine the resolver to use. - cc.parsedTarget = grpcutil.ParseTarget(cc.target, cc.dopts.copts.Dialer != nil) - channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) - resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) - if resolverBuilder == nil { - // If resolver builder is still nil, the parsed target's scheme is - // not registered. Fallback to default resolver and set Endpoint to - // the original target. - channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) - cc.parsedTarget = resolver.Target{ - Scheme: resolver.GetDefaultScheme(), - Endpoint: target, - } - resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) - if resolverBuilder == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) - } + resolverBuilder, err := cc.parseTargetAndFindResolver() + if err != nil { + return nil, err } - - creds := cc.dopts.copts.TransportCredentials - if creds != nil && creds.Info().ServerName != "" { - cc.authority = creds.Info().ServerName - } else if cc.dopts.insecure && cc.dopts.authority != "" { - cc.authority = cc.dopts.authority - } else if strings.HasPrefix(cc.target, "unix:") || strings.HasPrefix(cc.target, "unix-abstract:") { - cc.authority = "localhost" - } else if strings.HasPrefix(cc.parsedTarget.Endpoint, ":") { - cc.authority = "localhost" + cc.parsedTarget.Endpoint - } else { - // Use endpoint from "scheme://authority/endpoint" as the default - // authority for ClientConn. - cc.authority = cc.parsedTarget.Endpoint + cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts) + if err != nil { + return nil, err } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) if cc.dopts.scChan != nil && !scSet { // Blocking wait for the initial service config. @@ -902,10 +879,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { // ac.state is Ready, try to find the connected address. var curAddrFound bool for _, a := range addrs { - // a.ServerName takes precedent over ClientConn authority, if present. - if a.ServerName == "" { - a.ServerName = ac.cc.authority - } + a.ServerName = ac.cc.getServerName(a) if reflect.DeepEqual(ac.curAddr, a) { curAddrFound = true break @@ -919,6 +893,26 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return curAddrFound } +// getServerName determines the serverName to be used in the connection +// handshake. The default value for the serverName is the authority on the +// ClientConn, which either comes from the user's dial target or through an +// authority override specified using the WithAuthority dial option. Name +// resolvers can specify a per-address override for the serverName through the +// resolver.Address.ServerName field which is used only if the WithAuthority +// dial option was not used. The rationale is that per-address authority +// overrides specified by the name resolver can represent a security risk, while +// an override specified by the user is more dependable since they probably know +// what they are doing. +func (cc *ClientConn) getServerName(addr resolver.Address) string { + if cc.dopts.authority != "" { + return cc.dopts.authority + } + if addr.ServerName != "" { + return addr.ServerName + } + return cc.authority +} + func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { if sc == nil { return MethodConfig{} @@ -1275,11 +1269,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne prefaceReceived := grpcsync.NewEvent() connClosed := grpcsync.NewEvent() - // addr.ServerName takes precedent over ClientConn authority, if present. - if addr.ServerName == "" { - addr.ServerName = ac.cc.authority - } - + addr.ServerName = ac.cc.getServerName(addr) hctx, hcancel := context.WithCancel(ac.ctx) hcStarted := false // protected by ac.mu @@ -1621,3 +1611,114 @@ func (cc *ClientConn) connectionError() error { defer cc.lceMu.Unlock() return cc.lastConnectionError } + +func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { + channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) + + var rb resolver.Builder + parsedTarget, err := parseTarget(cc.target) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) + } else { + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.Scheme) + if rb != nil { + cc.parsedTarget = parsedTarget + return rb, nil + } + } + + // We are here because the user's dial target did not contain a scheme or + // specified an unregistered scheme. We should fallback to the default + // scheme, except when a custom dialer is specified in which case, we should + // always use passthrough scheme. + defScheme := resolver.GetDefaultScheme() + channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) + canonicalTarget := defScheme + ":///" + cc.target + + parsedTarget, err = parseTarget(canonicalTarget) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) + return nil, err + } + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.Scheme) + if rb == nil { + return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme) + } + cc.parsedTarget = parsedTarget + return rb, nil +} + +// parseTarget uses RFC 3986 semantics to parse the given target into a +// resolver.Target struct containing scheme, authority and endpoint. Query +// params are stripped from the endpoint. +func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field instead of the `Endpoint` field. + endpoint := u.Path + if endpoint == "" { + endpoint = u.Opaque + } + endpoint = strings.TrimPrefix(endpoint, "/") + return resolver.Target{ + Scheme: u.Scheme, + Authority: u.Host, + Endpoint: endpoint, + URL: *u, + }, nil +} + +// Determine channel authority. The order of precedence is as follows: +// - user specified authority override using `WithAuthority` dial option +// - creds' notion of server name for the authentication handshake +// - endpoint from dial target of the form "scheme://[authority]/endpoint" +func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { + // Historically, we had two options for users to specify the serverName or + // authority for a channel. One was through the transport credentials + // (either in its constructor, or through the OverrideServerName() method). + // The other option (for cases where WithInsecure() dial option was used) + // was to use the WithAuthority() dial option. + // + // A few things have changed since: + // - `insecure` package with an implementation of the `TransportCredentials` + // interface for the insecure case + // - WithAuthority() dial option support for secure credentials + authorityFromCreds := "" + if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { + authorityFromCreds = creds.Info().ServerName + } + authorityFromDialOption := dopts.authority + if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { + return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + switch { + case authorityFromDialOption != "": + return authorityFromDialOption, nil + case authorityFromCreds != "": + return authorityFromCreds, nil + case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): + // TODO: remove when the unix resolver implements optional interface to + // return channel authority. + return "localhost", nil + case strings.HasPrefix(endpoint, ":"): + return "localhost" + endpoint, nil + default: + // TODO: Define an optional interface on the resolver builder to return + // the channel authority given the user's dial target. For resolvers + // which don't implement this interface, we will use the endpoint from + // "scheme://authority/endpoint" as the default authority. + return endpoint, nil + } +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 7eee7e4ec1..a671107584 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -140,6 +140,11 @@ type TransportCredentials interface { // Additionally, ClientHandshakeInfo data will be available via the context // passed to this call. // + // The second argument to this method is the `:authority` header value used + // while creating new streams on this connection after authentication + // succeeds. Implementations must use this as the server name during the + // authentication handshake. + // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns @@ -153,9 +158,13 @@ type TransportCredentials interface { Info() ProtocolInfo // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials - // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. - // gRPC internals also use it to override the virtual hosting name if it is set. - // It must be called before dialing. Currently, this is only used by grpclb. + // OverrideServerName specifies the value used for the following: + // - verifying the hostname on the returned certificates + // - as SNI in the client's handshake to support virtual hosting + // - as the value for `:authority` header at stream creation time + // + // Deprecated: use grpc.WithAuthority instead. Will be supported + // throughout 1.x. OverrideServerName(string) error } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 7a497237bb..40d8ba6596 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -482,8 +482,7 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt } // WithAuthority returns a DialOption that specifies the value to be used as the -// :authority pseudo-header. This value only works with WithInsecure and has no -// effect if TransportCredentials are present. +// :authority pseudo-header and as the server name in authentication handshake. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a @@ -519,14 +518,16 @@ func WithDisableServiceConfig() DialOption { // WithDefaultServiceConfig returns a DialOption that configures the default // service config, which will be used in cases where: // -// 1. WithDisableServiceConfig is also used. -// 2. Resolver does not return a service config or if the resolver returns an -// invalid service config. +// 1. WithDisableServiceConfig is also used, or // -// Experimental +// 2. The name resolver does not provide a service config or provides an +// invalid service config. // -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// The parameter s is the JSON representation of the default service config. +// For more information about service configs, see: +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// For a simple example of usage, see: +// examples/features/load_balancing/client/main.go func WithDefaultServiceConfig(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.defaultServiceConfigRawJSON = &s @@ -538,14 +539,8 @@ func WithDefaultServiceConfig(s string) DialOption { // will happen automatically if no data is written to the wire or if the RPC is // unprocessed by the remote server. // -// Retry support is currently disabled by default, but will be enabled by -// default in the future. Until then, it may be enabled by setting the -// environment variable "GRPC_GO_RETRY" to "on". -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Retry support is currently enabled by default, but may be disabled by +// setting the environment variable "GRPC_GO_RETRY" to "off". func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index 022cc9828f..fcffdceef2 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -4,7 +4,8 @@ go 1.14 require ( github.com/cespare/xxhash/v2 v2.1.1 - github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 + github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 + github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/protobuf v1.4.3 diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index 6e7ae0db2b..8b542e0beb 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -9,10 +9,13 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158 h1:CevA8fI91PAnP8vpnXuB8ZYAZ5wqY86nAbxfgK8tWO4= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 4ee33171e0..34098bb8eb 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,11 +19,14 @@ package grpclog import ( + "encoding/json" + "fmt" "io" "io/ioutil" "log" "os" "strconv" + "strings" "google.golang.org/grpc/internal/grpclog" ) @@ -95,8 +98,9 @@ var severityName = []string{ // loggerT is the default logger used by grpclog. type loggerT struct { - m []*log.Logger - v int + m []*log.Logger + v int + jsonFormat bool } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -105,19 +109,32 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) +} + +type loggerV2Config struct { + verbose int + jsonFormat bool +} + +func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { var m []*log.Logger - m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + flag := log.LstdFlags + if c.jsonFormat { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) - m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) - return &loggerT{m: m, v: v} + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -142,58 +159,79 @@ func newLoggerV2() LoggerV2 { if vl, err := strconv.Atoi(vLevel); err == nil { v = vl } - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) + + jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") + + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ + verbose: v, + jsonFormat: jsonFormat, + }) +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) } func (g *loggerT) Info(args ...interface{}) { - g.m[infoLog].Print(args...) + g.output(infoLog, fmt.Sprint(args...)) } func (g *loggerT) Infoln(args ...interface{}) { - g.m[infoLog].Println(args...) + g.output(infoLog, fmt.Sprintln(args...)) } func (g *loggerT) Infof(format string, args ...interface{}) { - g.m[infoLog].Printf(format, args...) + g.output(infoLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Warning(args ...interface{}) { - g.m[warningLog].Print(args...) + g.output(warningLog, fmt.Sprint(args...)) } func (g *loggerT) Warningln(args ...interface{}) { - g.m[warningLog].Println(args...) + g.output(warningLog, fmt.Sprintln(args...)) } func (g *loggerT) Warningf(format string, args ...interface{}) { - g.m[warningLog].Printf(format, args...) + g.output(warningLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Error(args ...interface{}) { - g.m[errorLog].Print(args...) + g.output(errorLog, fmt.Sprint(args...)) } func (g *loggerT) Errorln(args ...interface{}) { - g.m[errorLog].Println(args...) + g.output(errorLog, fmt.Sprintln(args...)) } func (g *loggerT) Errorf(format string, args ...interface{}) { - g.m[errorLog].Printf(format, args...) + g.output(errorLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Fatal(args ...interface{}) { - g.m[fatalLog].Fatal(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) } func (g *loggerT) Fatalln(args ...interface{}) { - g.m[fatalLog].Fatalln(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) } func (g *loggerT) Fatalf(format string, args ...interface{}) { - g.m[fatalLog].Fatalf(format, args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) } func (g *loggerT) V(l int) bool { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index e766ac04af..9f25a67fc6 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -33,8 +33,9 @@ const ( ) var ( - // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on" or if XDS retry support is enabled. - Retry = strings.EqualFold(os.Getenv(retryStr), "on") || xdsenv.RetrySupport + // Retry is enabled unless explicitly disabled via "GRPC_GO_RETRY=off" or + // if XDS retry support is explicitly disabled. + Retry = !strings.EqualFold(os.Getenv(retryStr), "off") && xdsenv.RetrySupport // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") ) diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go new file mode 100644 index 0000000000..e2f948e8f4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go @@ -0,0 +1,20 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides utility functions used across the gRPC codebase. +package grpcutil diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go new file mode 100644 index 0000000000..2810a8ba2f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -0,0 +1,28 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import "regexp" + +// FullMatchWithRegex returns whether the full string matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, string string) bool { + re.Longest() + rem := re.FindString(string) + return len(rem) == len(string) +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go deleted file mode 100644 index 8833021da0..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcutil/target.go +++ /dev/null @@ -1,89 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcutil provides a bunch of utility functions to be used across the -// gRPC codebase. -package grpcutil - -import ( - "strings" - - "google.golang.org/grpc/resolver" -) - -// split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. -func split2(s, sep string) (string, string, bool) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - return "", "", false - } - return spl[0], spl[1], true -} - -// ParseTarget splits target into a resolver.Target struct containing scheme, -// authority and endpoint. skipUnixColonParsing indicates that the parse should -// not parse "unix:[path]" cases. This should be true in cases where a custom -// dialer is present, to prevent a behavior change. -// -// If target is not a valid scheme://authority/endpoint as specified in -// https://github.com/grpc/grpc/blob/master/doc/naming.md, -// it returns {Endpoint: target}. -func ParseTarget(target string, skipUnixColonParsing bool) (ret resolver.Target) { - var ok bool - if strings.HasPrefix(target, "unix-abstract:") { - if strings.HasPrefix(target, "unix-abstract://") { - // Maybe, with Authority specified, try to parse it - var remain string - ret.Scheme, remain, _ = split2(target, "://") - ret.Authority, ret.Endpoint, ok = split2(remain, "/") - if !ok { - // No Authority, add the "//" back - ret.Endpoint = "//" + remain - } else { - // Found Authority, add the "/" back - ret.Endpoint = "/" + ret.Endpoint - } - } else { - // Without Authority specified, split target on ":" - ret.Scheme, ret.Endpoint, _ = split2(target, ":") - } - return ret - } - ret.Scheme, ret.Endpoint, ok = split2(target, "://") - if !ok { - if strings.HasPrefix(target, "unix:") && !skipUnixColonParsing { - // Handle the "unix:[local/path]" and "unix:[/absolute/path]" cases, - // because splitting on :// only handles the - // "unix://[/absolute/path]" case. Only handle if the dialer is nil, - // to avoid a behavior change with custom dialers. - return resolver.Target{Scheme: "unix", Endpoint: target[len("unix:"):]} - } - return resolver.Target{Endpoint: target} - } - ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") - if !ok { - return resolver.Target{Endpoint: target} - } - if ret.Scheme == "unix" { - // Add the "/" back in the unix case, so the unix resolver receives the - // actual endpoint in the "unix://[/absolute/path]" case. - ret.Endpoint = "/" + ret.Endpoint - } - return ret -} diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index 302262613a..b8733dbf34 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -30,14 +30,38 @@ type mdKeyType string const mdKey = mdKeyType("grpc.internal.address.metadata") +type mdValue metadata.MD + +func (m mdValue) Equal(o interface{}) bool { + om, ok := o.(mdValue) + if !ok { + return false + } + if len(m) != len(om) { + return false + } + for k, v := range m { + ov := om[k] + if len(ov) != len(v) { + return false + } + for i, ve := range v { + if ov[i] != ve { + return false + } + } + } + return true +} + // Get returns the metadata of addr. func Get(addr resolver.Address) metadata.MD { attrs := addr.Attributes if attrs == nil { return nil } - md, _ := attrs.Value(mdKey).(metadata.MD) - return md + md, _ := attrs.Value(mdKey).(mdValue) + return metadata.MD(md) } // Set sets (overrides) the metadata in addr. @@ -45,6 +69,6 @@ func Get(addr resolver.Address) metadata.MD { // When a SubConn is created with this address, the RPCs sent on it will all // have this metadata. func Set(addr resolver.Address, md metadata.MD) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(mdKey, md) + addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) return addr } diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index be7e13d585..c7a18a948a 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -132,7 +132,7 @@ const csKey = csKeyType("grpc.internal.resolver.configSelector") // SetConfigSelector sets the config selector in state and returns the new // state. func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { - state.Attributes = state.Attributes.WithValues(csKey, cs) + state.Attributes = state.Attributes.WithValue(csKey, cs) return state } diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 0d5a811ddf..20852e59df 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -37,7 +37,17 @@ func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolv if target.Authority != "" { return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) } - addr := resolver.Address{Addr: target.Endpoint} + + // gRPC was parsing the dial target manually before PR #4817, and we + // switched to using url.Parse() in that PR. To avoid breaking existing + // resolver implementations we ended up stripping the leading "/" from the + // endpoint. This obviously does not work for the "unix" scheme. Hence we + // end up using the parsed URL instead. + endpoint := target.URL.Path + if endpoint == "" { + endpoint = target.URL.Opaque + } + addr := resolver.Address{Addr: endpoint} if b.scheme == unixAbstractScheme { // prepend "\x00" to address for unix-abstract addr.Addr = "\x00" + addr.Addr diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 45532f8aea..8394d252df 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -133,6 +133,7 @@ type cleanupStream struct { func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM type earlyAbortStream struct { + httpStatus uint32 streamID uint32 contentSubtype string status *status.Status @@ -771,9 +772,12 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if l.side == clientSide { return errors.New("earlyAbortStream not handled on client") } - + // In case the caller forgets to set the http status, default to 200. + if eas.httpStatus == 0 { + eas.httpStatus = 200 + } headerFields := []hpack.HeaderField{ - {Name: ":status", Value: "200"}, + {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 7558630743..2521a7d7a4 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -25,6 +25,7 @@ import ( "math" "net" "net/http" + "path/filepath" "strconv" "strings" "sync" @@ -146,13 +147,20 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error address := addr.Addr networkType, ok := networktype.Get(addr) if fn != nil { + // Special handling for unix scheme with custom dialer. Back in the day, + // we did not have a unix resolver and therefore targets with a unix + // scheme would end up using the passthrough resolver. So, user's used a + // custom dialer in this case and expected the original dial target to + // be passed to the custom dialer. Now, we have a unix resolver. But if + // a custom dialer is specified, we want to retain the old behavior in + // terms of the address being passed to the custom dialer. if networkType == "unix" && !strings.HasPrefix(address, "\x00") { - // For backward compatibility, if the user dialed "unix:///path", - // the passthrough resolver would be used and the user's custom - // dialer would see "unix:///path". Since the unix resolver is used - // and the address is now "/path", prepend "unix://" so the user's - // custom dialer sees the same address. - return fn(ctx, "unix://"+address) + // Supported unix targets are either "unix://absolute-path" or + // "unix:relative-path". + if filepath.IsAbs(address) { + return fn(ctx, "unix://"+address) + } + return fn(ctx, "unix:"+address) } return fn(ctx, address) } @@ -1073,7 +1081,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) { } // The server has closed the stream without sending trailers. Record that // the read direction is closed, and set the status appropriately. - if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) } } @@ -1403,26 +1411,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } isHeader := false - defer func() { - if t.statsHandler != nil { - if isHeader { - inHeader := &stats.InHeader{ - Client: true, - WireLength: int(frame.Header().Length), - Header: s.header.Copy(), - Compression: s.recvCompress, - } - t.statsHandler.HandleRPC(s.ctx, inHeader) - } else { - inTrailer := &stats.InTrailer{ - Client: true, - WireLength: int(frame.Header().Length), - Trailer: s.trailer.Copy(), - } - t.statsHandler.HandleRPC(s.ctx, inTrailer) - } - } - }() // If headerChan hasn't been closed yet if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { @@ -1444,6 +1432,25 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { close(s.headerChan) } + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + Compression: s.recvCompress, + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: metadata.MD(mdata).Copy(), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + if !endStream { return } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 19c13e041d..f2cad9ebc3 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -129,7 +129,7 @@ type http2Server struct { // options from config. // // It returns a non-nil transport and a nil error on success. On failure, it -// returns a non-nil transport and a nil-error. For a special case where the +// returns a nil transport and a non-nil error. For a special case where the // underlying conn gets closed before the client preface could be read, it // returns a nil transport and a nil error. func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { @@ -290,10 +290,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if _, err := io.ReadFull(t.conn, preface); err != nil { // In deployments where a gRPC server runs behind a cloud load balancer // which performs regular TCP level health checks, the connection is - // closed immediately by the latter. Skipping the error here will help - // reduce log clutter. + // closed immediately by the latter. Returning io.EOF here allows the + // grpc server implementation to recognize this scenario and suppress + // logging to reduce spam. if err == io.EOF { - return nil, nil + return nil, io.EOF } return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) } @@ -390,6 +391,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if timeout, err = decodeTimeout(hf.Value); err != nil { headerError = true } + // "Transports must consider requests containing the Connection header + // as malformed." - A41 + case "connection": + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") + } + headerError = true default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break @@ -404,6 +412,25 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } } + // "If multiple Host headers or multiple :authority headers are present, the + // request must be rejected with an HTTP status code 400 as required by Host + // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM + // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 + // error, this takes precedence over a client not speaking gRPC. + if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { + errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) + if logger.V(logLevel) { + logger.Errorf("transport: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 400, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + }) + return false + } + if !isGRPC || headerError { t.controlBuf.put(&cleanupStream{ streamID: streamID, @@ -414,6 +441,19 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( return false } + // "If :authority is missing, Host must be renamed to :authority." - A41 + if len(mdata[":authority"]) == 0 { + // No-op if host isn't present, no eventual :authority header is a valid + // RPC. + if host, ok := mdata["host"]; ok { + mdata[":authority"] = host + delete(mdata, "host") + } + } else { + // "If :authority is present, Host must be discarded" - A41 + delete(mdata, "host") + } + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone @@ -494,6 +534,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( stat = status.New(codes.PermissionDenied, err.Error()) } t.controlBuf.put(&earlyAbortStream{ + httpStatus: 200, streamID: s.id, contentSubtype: s.contentSubtype, status: stat, @@ -734,7 +775,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) { s.write(recvMsg{buffer: buffer}) } } - if f.Header().Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { // Received the end of stream from the client. s.compareAndSwapState(streamActive, streamReadDone) s.write(recvMsg{err: io.EOF}) diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go index 7bb53cff10..c11b527827 100644 --- a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -31,7 +31,7 @@ const key = keyType("grpc.internal.transport.networktype") // Set returns a copy of the provided address with attributes containing networkType. func Set(address resolver.Address, networkType string) resolver.Address { - address.Attributes = address.Attributes.WithValues(key, networkType) + address.Attributes = address.Attributes.WithValue(key, networkType) return address } diff --git a/vendor/google.golang.org/grpc/internal/xds/env/env.go b/vendor/google.golang.org/grpc/internal/xds/env/env.go index b171ac91f1..87d3c2433a 100644 --- a/vendor/google.golang.org/grpc/internal/xds/env/env.go +++ b/vendor/google.golang.org/grpc/internal/xds/env/env.go @@ -43,7 +43,7 @@ const ( clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" retrySupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY" - rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RBAC" + rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" c2pResolverSupportEnv = "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" @@ -83,8 +83,10 @@ var ( // RetrySupport indicates whether xDS retry is enabled. RetrySupport = !strings.EqualFold(os.Getenv(retrySupportEnv), "false") - // RBACSupport indicates whether xDS configured RBAC HTTP Filter is enabled. - RBACSupport = strings.EqualFold(os.Getenv(rbacSupportEnv), "true") + // RBACSupport indicates whether xDS configured RBAC HTTP Filter is enabled, + // which can be disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". + RBACSupport = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") // C2PResolverSupport indicates whether support for C2P resolver is enabled. // This can be enabled by setting the environment variable diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go index 3677c3f04f..e8b492774d 100644 --- a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go +++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go @@ -28,7 +28,7 @@ type handshakeClusterNameKey struct{} // SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field // is updated with the cluster name. func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(handshakeClusterNameKey{}, clusterName) + addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) return addr } diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 0878ada9db..e8367cb899 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -144,7 +144,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. acw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { - logger.Error("subconn returned from pick is not *acBalancerWrapper") + logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } if t := acw.getAddrConn().getReadyTransport(); t != nil { diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go new file mode 100644 index 0000000000..e87ecd0eeb --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +type addressMapEntry struct { + addr Address + value interface{} +} + +// AddressMap is a map of addresses to arbitrary values taking into account +// Attributes. BalancerAttributes are ignored, as are Metadata and Type. +// Multiple accesses may not be performed concurrently. Must be created via +// NewAddressMap; do not construct directly. +type AddressMap struct { + m map[string]addressMapEntryList +} + +type addressMapEntryList []*addressMapEntry + +// NewAddressMap creates a new AddressMap. +func NewAddressMap() *AddressMap { + return &AddressMap{m: make(map[string]addressMapEntryList)} +} + +// find returns the index of addr in the addressMapEntry slice, or -1 if not +// present. +func (l addressMapEntryList) find(addr Address) int { + if len(l) == 0 { + return -1 + } + for i, entry := range l { + if entry.addr.ServerName == addr.ServerName && + entry.addr.Attributes.Equal(addr.Attributes) { + return i + } + } + return -1 +} + +// Get returns the value for the address in the map, if present. +func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { + entryList := a.m[addr.Addr] + if entry := entryList.find(addr); entry != -1 { + return entryList[entry].value, true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (a *AddressMap) Set(addr Address, value interface{}) { + entryList := a.m[addr.Addr] + if entry := entryList.find(addr); entry != -1 { + a.m[addr.Addr][entry].value = value + return + } + a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value}) +} + +// Delete removes addr from the map. +func (a *AddressMap) Delete(addr Address) { + entryList := a.m[addr.Addr] + entry := entryList.find(addr) + if entry == -1 { + return + } + if len(entryList) == 1 { + entryList = nil + } else { + copy(entryList[entry:], entryList[entry+1:]) + entryList = entryList[:len(entryList)-1] + } + a.m[addr.Addr] = entryList +} + +// Len returns the number of entries in the map. +func (a *AddressMap) Len() int { + ret := 0 + for _, entryList := range a.m { + ret += len(entryList) + } + return ret +} + +// Keys returns a slice of all current map keys. +func (a *AddressMap) Keys() []Address { + ret := make([]Address, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.addr) + } + } + return ret +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 6a9d234a59..e28b680260 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -23,6 +23,7 @@ package resolver import ( "context" "net" + "net/url" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" @@ -116,9 +117,14 @@ type Address struct { ServerName string // Attributes contains arbitrary data about this address intended for - // consumption by the load balancing policy. + // consumption by the SubConn. Attributes *attributes.Attributes + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attribes do not affect SubConn + // creation, connection establishment, handshaking, etc. + BalancerAttributes *attributes.Attributes + // Type is the type of this address. // // Deprecated: use Attributes instead. @@ -131,6 +137,15 @@ type Address struct { Metadata interface{} } +// Equal returns whether a and o are identical. Metadata is compared directly, +// not with any recursive introspection. +func (a *Address) Equal(o Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && + a.Type == o.Type && a.Metadata == o.Metadata +} + // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { @@ -204,25 +219,36 @@ type ClientConn interface { // Target represents a target for gRPC, as specified in: // https://github.com/grpc/grpc/blob/master/doc/naming.md. -// It is parsed from the target string that gets passed into Dial or DialContext by the user. And -// grpc passes it to the resolver and the balancer. +// It is parsed from the target string that gets passed into Dial or DialContext +// by the user. And gRPC passes it to the resolver and the balancer. // -// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will -// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed -// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// If the target follows the naming spec, and the parsed scheme is registered +// with gRPC, we will parse the target string according to the spec. If the +// target does not contain a scheme or if the parsed scheme is not registered +// (i.e. no corresponding resolver available to resolve the endpoint), we will +// apply the default scheme, and will attempt to reparse it. // -// If the target does not contain a scheme, we will apply the default scheme, and set the Target to -// be the full target string. e.g. "foo.bar" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. +// Examples: // -// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the -// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target -// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - Scheme string + // Deprecated: use URL.Scheme instead. + Scheme string + // Deprecated: use URL.Host instead. Authority string - Endpoint string + // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when + // the former is empty. + Endpoint string + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial + // target can be accessed from here. + URL url.URL } // Builder creates a resolver that will be used to watch name resolution updates. diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 557f29559d..eadf9e05fd 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -885,13 +885,11 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { // ErrConnDispatched means that the connection was dispatched away from // gRPC; those connections should be left open. if err != credentials.ErrConnDispatched { - c.Close() - } - // Don't log on ErrConnDispatched and io.EOF to prevent log spam. - if err != credentials.ErrConnDispatched { + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. if err != io.EOF { channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) } + c.Close() } return nil } @@ -1106,16 +1104,21 @@ func chainUnaryServerInterceptors(s *Server) { func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - var i int - var next UnaryHandler - next = func(ctx context.Context, req interface{}) (interface{}, error) { - if i == len(interceptors)-1 { - return interceptors[i](ctx, req, info, handler) + // the struct ensures the variables are allocated together, rather than separately, since we + // know they should be garbage collected together. This saves 1 allocation and decreases + // time/call by about 10% on the microbenchmark. + var state struct { + i int + next UnaryHandler + } + state.next = func(ctx context.Context, req interface{}) (interface{}, error) { + if state.i == len(interceptors)-1 { + return interceptors[state.i](ctx, req, info, handler) } - i++ - return interceptors[i-1](ctx, req, info, next) + state.i++ + return interceptors[state.i-1](ctx, req, info, state.next) } - return next(ctx, req) + return state.next(ctx, req) } } @@ -1391,16 +1394,21 @@ func chainStreamServerInterceptors(s *Server) { func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - var i int - var next StreamHandler - next = func(srv interface{}, ss ServerStream) error { - if i == len(interceptors)-1 { - return interceptors[i](srv, ss, info, handler) + // the struct ensures the variables are allocated together, rather than separately, since we + // know they should be garbage collected together. This saves 1 allocation and decreases + // time/call by about 10% on the microbenchmark. + var state struct { + i int + next StreamHandler + } + state.next = func(srv interface{}, ss ServerStream) error { + if state.i == len(interceptors)-1 { + return interceptors[state.i](srv, ss, info, handler) } - i++ - return interceptors[i-1](srv, ss, info, next) + state.i++ + return interceptors[state.i-1](srv, ss, info, state.next) } - return next(srv, ss) + return state.next(srv, ss) } } diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 54d187186b..af2cffe985 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -73,11 +73,16 @@ func FromProto(s *spb.Status) *Status { return status.FromProto(s) } -// FromError returns a Status representing err if it was produced by this -// package or has a method `GRPCStatus() *Status`. -// If err is nil, a Status is returned with codes.OK and no message. -// Otherwise, ok is false and a Status is returned with codes.Unknown and -// the original error message. +// FromError returns a Status representation of err. +// +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status`, the appropriate Status is returned. +// +// - If err is nil, a Status is returned with codes.OK and no message. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 48594bc246..1a5fd584af 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.41.0" +const Version = "1.42.0" diff --git a/vendor/gopkg.in/ini.v1/.golangci.yml b/vendor/gopkg.in/ini.v1/.golangci.yml new file mode 100644 index 0000000000..b7256bae13 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/.golangci.yml @@ -0,0 +1,21 @@ +linters-settings: + nakedret: + max-func-lines: 0 # Disallow any unnamed return statement + +linters: + enable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + - nakedret + - gofmt + - rowserrcheck + - unconvert + - goimports diff --git a/vendor/gopkg.in/ini.v1/codecov.yml b/vendor/gopkg.in/ini.v1/codecov.yml index fc947f2308..31f646ee06 100644 --- a/vendor/gopkg.in/ini.v1/codecov.yml +++ b/vendor/gopkg.in/ini.v1/codecov.yml @@ -6,4 +6,4 @@ coverage: threshold: 1% comment: - layout: 'diff, files' + layout: 'diff' diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/gopkg.in/ini.v1/ini.go index 23f07422ef..ac2a93a5b3 100644 --- a/vendor/gopkg.in/ini.v1/ini.go +++ b/vendor/gopkg.in/ini.v1/ini.go @@ -1,5 +1,3 @@ -// +build go1.6 - // Copyright 2014 Unknwon // // Licensed under the Apache License, Version 2.0 (the "License"): you may @@ -125,6 +123,8 @@ type LoadOptions struct { ReaderBufferSize int // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times. AllowNonUniqueSections bool + // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated. + AllowDuplicateShadowValues bool } // DebugFunc is the type of function called to log parse events. diff --git a/vendor/gopkg.in/ini.v1/key.go b/vendor/gopkg.in/ini.v1/key.go index 8baafd9ea6..0302c291bf 100644 --- a/vendor/gopkg.in/ini.v1/key.go +++ b/vendor/gopkg.in/ini.v1/key.go @@ -54,14 +54,16 @@ func (k *Key) addShadow(val string) error { return errors.New("cannot add shadow to auto-increment or boolean key") } - // Deduplicate shadows based on their values. - if k.value == val { - return nil - } - for i := range k.shadows { - if k.shadows[i].value == val { + if !k.s.f.options.AllowDuplicateShadowValues { + // Deduplicate shadows based on their values. + if k.value == val { return nil } + for i := range k.shadows { + if k.shadows[i].value == val { + return nil + } + } } shadow := newKey(k.s, k.name, val) @@ -781,10 +783,8 @@ func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]u return vals, err } - type Parser func(str string) (interface{}, error) - // parseTimesFormat transforms strings to times in given format. func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { vals := make([]time.Time, 0, len(strs)) @@ -801,7 +801,6 @@ func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnO return vals, err } - // doParse transforms strings to different types func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) { vals := make([]interface{}, 0, len(strs)) diff --git a/vendor/gopkg.in/ini.v1/parser.go b/vendor/gopkg.in/ini.v1/parser.go index 65147166f9..b8b5aa86a3 100644 --- a/vendor/gopkg.in/ini.v1/parser.go +++ b/vendor/gopkg.in/ini.v1/parser.go @@ -131,7 +131,7 @@ func readKeyName(delimiters string, in []byte) (string, int, error) { // Check if key name surrounded by quotes. var keyQuote string if line[0] == '"' { - if len(line) > 6 && string(line[0:3]) == `"""` { + if len(line) > 6 && line[0:3] == `"""` { keyQuote = `"""` } else { keyQuote = `"` @@ -232,7 +232,7 @@ func (p *parser) readValue(in []byte, bufferSize int) (string, error) { } var valQuote string - if len(line) > 3 && string(line[0:3]) == `"""` { + if len(line) > 3 && line[0:3] == `"""` { valQuote = `"""` } else if line[0] == '`' { valQuote = "`" @@ -289,12 +289,8 @@ func (p *parser) readValue(in []byte, bufferSize int) (string, error) { hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote { line = line[1 : len(line)-1] } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols { - if strings.Contains(line, `\;`) { - line = strings.Replace(line, `\;`, ";", -1) - } - if strings.Contains(line, `\#`) { - line = strings.Replace(line, `\#`, "#", -1) - } + line = strings.ReplaceAll(line, `\;`, ";") + line = strings.ReplaceAll(line, `\#`, "#") } else if p.options.AllowPythonMultilineValues && lastChar == '\n' { return p.readPythonMultilines(line, bufferSize) } diff --git a/vendor/gopkg.in/ini.v1/section.go b/vendor/gopkg.in/ini.v1/section.go index afaa97c97e..a3615d820b 100644 --- a/vendor/gopkg.in/ini.v1/section.go +++ b/vendor/gopkg.in/ini.v1/section.go @@ -217,7 +217,7 @@ func (s *Section) KeysHash() map[string]string { defer s.f.lock.RUnlock() } - hash := map[string]string{} + hash := make(map[string]string, len(s.keysHash)) for key, value := range s.keysHash { hash[key] = value } diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/README.md b/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/README.md new file mode 100644 index 0000000000..096f99e1d2 --- /dev/null +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/README.md @@ -0,0 +1,56 @@ +# Azure Active Directory plugin for client authentication + +This plugin provides an integration with Azure Active Directory device flow. If no tokens are present in the kubectl configuration, it will prompt a device code which can be used to login in a browser. After login it will automatically fetch the tokens and store them in the kubectl configuration. In addition it will refresh and update the tokens in the configuration when expired. + +## Usage + +1. Create an Azure Active Directory *Web App / API* application for `apiserver` following these [instructions](https://docs.microsoft.com/en-us/azure/active-directory/active-directory-app-registration). The callback URL does not matter (just cannot be empty). + +2. Create a second Azure Active Directory native application for `kubectl`. The callback URL does not matter (just cannot be empty). + +3. On `kubectl` application's configuration page in Azure portal grant permissions to `apiserver` application by clicking on *Required Permissions*, click the *Add* button and search for the apiserver application created in step 1. Select "Access apiserver" under the *DELEGATED PERMISSIONS*. Once added click the *Grant Permissions* button to apply the changes. + +4. Configure the `apiserver` to use the Azure Active Directory as an OIDC provider with following options + + ``` + --oidc-client-id="spn:APISERVER_APPLICATION_ID" \ + --oidc-issuer-url="https://sts.windows.net/TENANT_ID/" + --oidc-username-claim="sub" + ``` + + * Replace the `APISERVER_APPLICATION_ID` with the application ID of `apiserver` application + * Replace `TENANT_ID` with your tenant ID. +   * For a list of alternative username claims that are supported by the OIDC issuer check the JSON response at `https://sts.windows.net/TENANT_ID/.well-known/openid-configuration`. + +5. Configure `kubectl` to use the `azure` authentication provider + + ``` + kubectl config set-credentials "USER_NAME" --auth-provider=azure \ + --auth-provider-arg=environment=AzurePublicCloud \ + --auth-provider-arg=client-id=APPLICATION_ID \ + --auth-provider-arg=tenant-id=TENANT_ID \ + --auth-provider-arg=apiserver-id=APISERVER_APPLICATION_ID + ``` + + * Supported environments: `AzurePublicCloud`, `AzureUSGovernmentCloud`, `AzureChinaCloud`, `AzureGermanCloud` + * Replace `USER_NAME` and `TENANT_ID` with your user name and tenant ID + * Replace `APPLICATION_ID` with the application ID of your`kubectl` application ID + * Replace `APISERVER_APPLICATION_ID` with the application ID of your `apiserver` application ID + * Be sure to also (create and) select a context that uses above user + +6. (Optionally) the AAD token has `aud` claim with `spn:` prefix. To omit that, add following auth configuration: + + ``` + --auth-provider-arg=config-mode="1" + ``` + + 7. The access token is acquired when first `kubectl` command is executed + + ``` + kubectl get pods + + To sign in, use a web browser to open the page https://aka.ms/devicelogin and enter the code DEC7D48GA to authenticate. + ``` + + * After signing in a web browser, the token is stored in the configuration, and it will be reused when executing further commands. + * The resulting username in Kubernetes depends on your [configuration of the `--oidc-username-claim` and `--oidc-username-prefix` flags on the API server](https://kubernetes.io/docs/admin/authentication/#configuring-the-api-server). If you are using any authorization method you need to give permissions to that user, e.g. by binding the user to a role in the case of RBAC. diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go new file mode 100644 index 0000000000..a5218029bb --- /dev/null +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go @@ -0,0 +1,469 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "strconv" + "sync" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/util/net" + restclient "k8s.io/client-go/rest" +) + +type configMode int + +const ( + azureTokenKey = "azureTokenKey" + tokenType = "Bearer" + authHeader = "Authorization" + + cfgClientID = "client-id" + cfgTenantID = "tenant-id" + cfgAccessToken = "access-token" + cfgRefreshToken = "refresh-token" + cfgExpiresIn = "expires-in" + cfgExpiresOn = "expires-on" + cfgEnvironment = "environment" + cfgApiserverID = "apiserver-id" + cfgConfigMode = "config-mode" + + configModeDefault configMode = 0 + configModeOmitSPNPrefix configMode = 1 +) + +func init() { + if err := restclient.RegisterAuthProviderPlugin("azure", newAzureAuthProvider); err != nil { + klog.Fatalf("Failed to register azure auth plugin: %v", err) + } +} + +var cache = newAzureTokenCache() + +type azureTokenCache struct { + lock sync.Mutex + cache map[string]*azureToken +} + +func newAzureTokenCache() *azureTokenCache { + return &azureTokenCache{cache: make(map[string]*azureToken)} +} + +func (c *azureTokenCache) getToken(tokenKey string) *azureToken { + c.lock.Lock() + defer c.lock.Unlock() + return c.cache[tokenKey] +} + +func (c *azureTokenCache) setToken(tokenKey string, token *azureToken) { + c.lock.Lock() + defer c.lock.Unlock() + c.cache[tokenKey] = token +} + +func newAzureAuthProvider(_ string, cfg map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) { + var ( + ts tokenSource + environment azure.Environment + err error + mode configMode + ) + + environment, err = azure.EnvironmentFromName(cfg[cfgEnvironment]) + if err != nil { + environment = azure.PublicCloud + } + + mode = configModeDefault + if cfg[cfgConfigMode] != "" { + configModeInt, err := strconv.Atoi(cfg[cfgConfigMode]) + if err != nil { + return nil, fmt.Errorf("failed to parse %s, error: %s", cfgConfigMode, err) + } + mode = configMode(configModeInt) + switch mode { + case configModeOmitSPNPrefix: + case configModeDefault: + default: + return nil, fmt.Errorf("%s:%s is not a valid mode", cfgConfigMode, cfg[cfgConfigMode]) + } + } + ts, err = newAzureTokenSourceDeviceCode(environment, cfg[cfgClientID], cfg[cfgTenantID], cfg[cfgApiserverID], mode) + if err != nil { + return nil, fmt.Errorf("creating a new azure token source for device code authentication: %v", err) + } + cacheSource := newAzureTokenSource(ts, cache, cfg, mode, persister) + + return &azureAuthProvider{ + tokenSource: cacheSource, + }, nil +} + +type azureAuthProvider struct { + tokenSource tokenSource +} + +func (p *azureAuthProvider) Login() error { + return errors.New("not yet implemented") +} + +func (p *azureAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper { + return &azureRoundTripper{ + tokenSource: p.tokenSource, + roundTripper: rt, + } +} + +type azureRoundTripper struct { + tokenSource tokenSource + roundTripper http.RoundTripper +} + +var _ net.RoundTripperWrapper = &azureRoundTripper{} + +func (r *azureRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get(authHeader)) != 0 { + return r.roundTripper.RoundTrip(req) + } + + token, err := r.tokenSource.Token() + if err != nil { + klog.Errorf("Failed to acquire a token: %v", err) + return nil, fmt.Errorf("acquiring a token for authorization header: %v", err) + } + + // clone the request in order to avoid modifying the headers of the original request + req2 := new(http.Request) + *req2 = *req + req2.Header = make(http.Header, len(req.Header)) + for k, s := range req.Header { + req2.Header[k] = append([]string(nil), s...) + } + + req2.Header.Set(authHeader, fmt.Sprintf("%s %s", tokenType, token.token.AccessToken)) + + return r.roundTripper.RoundTrip(req2) +} + +func (r *azureRoundTripper) WrappedRoundTripper() http.RoundTripper { return r.roundTripper } + +type azureToken struct { + token adal.Token + environment string + clientID string + tenantID string + apiserverID string +} + +type tokenSource interface { + Token() (*azureToken, error) + Refresh(*azureToken) (*azureToken, error) +} + +type azureTokenSource struct { + source tokenSource + cache *azureTokenCache + lock sync.Mutex + configMode configMode + cfg map[string]string + persister restclient.AuthProviderConfigPersister +} + +func newAzureTokenSource(source tokenSource, cache *azureTokenCache, cfg map[string]string, configMode configMode, persister restclient.AuthProviderConfigPersister) tokenSource { + return &azureTokenSource{ + source: source, + cache: cache, + cfg: cfg, + persister: persister, + configMode: configMode, + } +} + +// Token fetches a token from the cache of configuration if present otherwise +// acquires a new token from the configured source. Automatically refreshes +// the token if expired. +func (ts *azureTokenSource) Token() (*azureToken, error) { + ts.lock.Lock() + defer ts.lock.Unlock() + + var err error + token := ts.cache.getToken(azureTokenKey) + + if token != nil && !token.token.IsExpired() { + return token, nil + } + + // retrieve from config if no cache + if token == nil { + tokenFromCfg, err := ts.retrieveTokenFromCfg() + + if err == nil { + token = tokenFromCfg + } + } + + if token != nil { + // cache and return if the token is as good + // avoids frequent persistor calls + if !token.token.IsExpired() { + ts.cache.setToken(azureTokenKey, token) + return token, nil + } + + klog.V(4).Info("Refreshing token.") + tokenFromRefresh, err := ts.Refresh(token) + switch { + case err == nil: + token = tokenFromRefresh + case autorest.IsTokenRefreshError(err): + klog.V(4).Infof("Failed to refresh expired token, proceed to auth: %v", err) + // reset token to nil so that the token source will be used to acquire new + token = nil + default: + return nil, fmt.Errorf("unexpected error when refreshing token: %v", err) + } + } + + if token == nil { + tokenFromSource, err := ts.source.Token() + if err != nil { + return nil, fmt.Errorf("failed acquiring new token: %v", err) + } + token = tokenFromSource + } + + // sanity check + if token == nil { + return nil, fmt.Errorf("unable to acquire token") + } + + // corner condition, newly got token is valid but expired + if token.token.IsExpired() { + return nil, fmt.Errorf("newly acquired token is expired") + } + + err = ts.storeTokenInCfg(token) + if err != nil { + return nil, fmt.Errorf("storing the refreshed token in configuration: %v", err) + } + ts.cache.setToken(azureTokenKey, token) + + return token, nil +} + +func (ts *azureTokenSource) retrieveTokenFromCfg() (*azureToken, error) { + accessToken := ts.cfg[cfgAccessToken] + if accessToken == "" { + return nil, fmt.Errorf("no access token in cfg: %s", cfgAccessToken) + } + refreshToken := ts.cfg[cfgRefreshToken] + if refreshToken == "" { + return nil, fmt.Errorf("no refresh token in cfg: %s", cfgRefreshToken) + } + environment := ts.cfg[cfgEnvironment] + if environment == "" { + return nil, fmt.Errorf("no environment in cfg: %s", cfgEnvironment) + } + clientID := ts.cfg[cfgClientID] + if clientID == "" { + return nil, fmt.Errorf("no client ID in cfg: %s", cfgClientID) + } + tenantID := ts.cfg[cfgTenantID] + if tenantID == "" { + return nil, fmt.Errorf("no tenant ID in cfg: %s", cfgTenantID) + } + resourceID := ts.cfg[cfgApiserverID] + if resourceID == "" { + return nil, fmt.Errorf("no apiserver ID in cfg: %s", cfgApiserverID) + } + expiresIn := ts.cfg[cfgExpiresIn] + if expiresIn == "" { + return nil, fmt.Errorf("no expiresIn in cfg: %s", cfgExpiresIn) + } + expiresOn := ts.cfg[cfgExpiresOn] + if expiresOn == "" { + return nil, fmt.Errorf("no expiresOn in cfg: %s", cfgExpiresOn) + } + tokenAudience := resourceID + if ts.configMode == configModeDefault { + tokenAudience = fmt.Sprintf("spn:%s", resourceID) + } + + return &azureToken{ + token: adal.Token{ + AccessToken: accessToken, + RefreshToken: refreshToken, + ExpiresIn: json.Number(expiresIn), + ExpiresOn: json.Number(expiresOn), + NotBefore: json.Number(expiresOn), + Resource: tokenAudience, + Type: tokenType, + }, + environment: environment, + clientID: clientID, + tenantID: tenantID, + apiserverID: resourceID, + }, nil +} + +func (ts *azureTokenSource) storeTokenInCfg(token *azureToken) error { + newCfg := make(map[string]string) + newCfg[cfgAccessToken] = token.token.AccessToken + newCfg[cfgRefreshToken] = token.token.RefreshToken + newCfg[cfgEnvironment] = token.environment + newCfg[cfgClientID] = token.clientID + newCfg[cfgTenantID] = token.tenantID + newCfg[cfgApiserverID] = token.apiserverID + newCfg[cfgExpiresIn] = string(token.token.ExpiresIn) + newCfg[cfgExpiresOn] = string(token.token.ExpiresOn) + newCfg[cfgConfigMode] = strconv.Itoa(int(ts.configMode)) + + err := ts.persister.Persist(newCfg) + if err != nil { + return fmt.Errorf("persisting the configuration: %v", err) + } + ts.cfg = newCfg + return nil +} + +func (ts *azureTokenSource) Refresh(token *azureToken) (*azureToken, error) { + return ts.source.Refresh(token) +} + +// refresh outdated token with adal. +// adal.RefreshTokenError will be returned if error occur during refreshing. +func (ts *azureTokenSourceDeviceCode) Refresh(token *azureToken) (*azureToken, error) { + env, err := azure.EnvironmentFromName(token.environment) + if err != nil { + return nil, err + } + + var oauthConfig *adal.OAuthConfig + if ts.configMode == configModeOmitSPNPrefix { + oauthConfig, err = adal.NewOAuthConfigWithAPIVersion(env.ActiveDirectoryEndpoint, token.tenantID, nil) + if err != nil { + return nil, fmt.Errorf("building the OAuth configuration without api-version for token refresh: %v", err) + } + } else { + oauthConfig, err = adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, token.tenantID) + if err != nil { + return nil, fmt.Errorf("building the OAuth configuration for token refresh: %v", err) + } + } + + callback := func(t adal.Token) error { + return nil + } + spt, err := adal.NewServicePrincipalTokenFromManualToken( + *oauthConfig, + token.clientID, + token.apiserverID, + token.token, + callback) + if err != nil { + return nil, fmt.Errorf("creating new service principal for token refresh: %v", err) + } + + if err := spt.Refresh(); err != nil { + return nil, fmt.Errorf("refreshing token: %v", err) + } + + return &azureToken{ + token: spt.Token(), + environment: token.environment, + clientID: token.clientID, + tenantID: token.tenantID, + apiserverID: token.apiserverID, + }, nil +} + +type azureTokenSourceDeviceCode struct { + environment azure.Environment + clientID string + tenantID string + apiserverID string + configMode configMode +} + +func newAzureTokenSourceDeviceCode(environment azure.Environment, clientID string, tenantID string, apiserverID string, configMode configMode) (tokenSource, error) { + if clientID == "" { + return nil, errors.New("client-id is empty") + } + if tenantID == "" { + return nil, errors.New("tenant-id is empty") + } + if apiserverID == "" { + return nil, errors.New("apiserver-id is empty") + } + return &azureTokenSourceDeviceCode{ + environment: environment, + clientID: clientID, + tenantID: tenantID, + apiserverID: apiserverID, + configMode: configMode, + }, nil +} + +func (ts *azureTokenSourceDeviceCode) Token() (*azureToken, error) { + var ( + oauthConfig *adal.OAuthConfig + err error + ) + if ts.configMode == configModeOmitSPNPrefix { + oauthConfig, err = adal.NewOAuthConfigWithAPIVersion(ts.environment.ActiveDirectoryEndpoint, ts.tenantID, nil) + if err != nil { + return nil, fmt.Errorf("building the OAuth configuration without api-version for device code authentication: %v", err) + } + } else { + oauthConfig, err = adal.NewOAuthConfig(ts.environment.ActiveDirectoryEndpoint, ts.tenantID) + if err != nil { + return nil, fmt.Errorf("building the OAuth configuration for device code authentication: %v", err) + } + } + client := &autorest.Client{} + deviceCode, err := adal.InitiateDeviceAuth(client, *oauthConfig, ts.clientID, ts.apiserverID) + if err != nil { + return nil, fmt.Errorf("initialing the device code authentication: %v", err) + } + + _, err = fmt.Fprintln(os.Stderr, *deviceCode.Message) + if err != nil { + return nil, fmt.Errorf("prompting the device code message: %v", err) + } + + token, err := adal.WaitForUserCompletion(client, deviceCode) + if err != nil { + return nil, fmt.Errorf("waiting for device code authentication to complete: %v", err) + } + + return &azureToken{ + token: *token, + environment: ts.environment.Name, + clientID: ts.clientID, + tenantID: ts.tenantID, + apiserverID: ts.apiserverID, + }, nil +} diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack_stub.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack_stub.go new file mode 100644 index 0000000000..6e404beda2 --- /dev/null +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack_stub.go @@ -0,0 +1,36 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openstack + +import ( + "errors" + + "k8s.io/client-go/rest" + "k8s.io/klog/v2" +) + +func init() { + if err := rest.RegisterAuthProviderPlugin("openstack", newOpenstackAuthProvider); err != nil { + klog.Fatalf("Failed to register openstack auth plugin: %s", err) + } +} + +func newOpenstackAuthProvider(_ string, _ map[string]string, _ rest.AuthProviderConfigPersister) (rest.AuthProvider, error) { + return nil, errors.New(`The openstack auth plugin has been removed. +Please use the "client-keystone-auth" kubectl/client-go credential plugin instead. +See https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/using-client-keystone-auth.md for further details`) +} diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins.go new file mode 100644 index 0000000000..d1efc86cde --- /dev/null +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins.go @@ -0,0 +1,22 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +import ( + // Initialize common client auth plugins. + _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" +) diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins_providers.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins_providers.go new file mode 100644 index 0000000000..967039662c --- /dev/null +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins_providers.go @@ -0,0 +1,26 @@ +// +build !providerless + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +import ( + // Initialize client auth plugins for cloud providers. + _ "k8s.io/client-go/plugin/pkg/client/auth/azure" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + _ "k8s.io/client-go/plugin/pkg/client/auth/openstack" +) diff --git a/vendor/k8s.io/klog/v2/go.mod b/vendor/k8s.io/klog/v2/go.mod index eb297b6a1e..31aefba74a 100644 --- a/vendor/k8s.io/klog/v2/go.mod +++ b/vendor/k8s.io/klog/v2/go.mod @@ -2,4 +2,4 @@ module k8s.io/klog/v2 go 1.13 -require github.com/go-logr/logr v0.4.0 +require github.com/go-logr/logr v1.2.0 diff --git a/vendor/k8s.io/klog/v2/go.sum b/vendor/k8s.io/klog/v2/go.sum index 5778f81742..919fbadbc0 100644 --- a/vendor/k8s.io/klog/v2/go.sum +++ b/vendor/k8s.io/klog/v2/go.sum @@ -1,2 +1,2 @@ -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 25483fad13..dacdf91120 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -284,6 +284,7 @@ func (m *moduleSpec) Get() interface{} { var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") +// Set will sets module value // Syntax: -vmodule=recordio=2,file=1,gfs*=3 func (m *moduleSpec) Set(value string) error { var filter []modulePat @@ -362,6 +363,7 @@ func (t *traceLocation) Get() interface{} { var errTraceSyntax = errors.New("syntax error: expect file.go:234") +// Set will sets backtrace value // Syntax: -log_backtrace_at=gopherflakes.go:234 // Note that unlike vmodule the file extension is included here. func (t *traceLocation) Set(value string) error { @@ -507,7 +509,7 @@ type loggingT struct { addDirHeader bool // If set, all output will be redirected unconditionally to the provided logr.Logger - logr logr.Logger + logr *logr.Logger // If true, messages will not be propagated to lower severity log levels oneOutput bool @@ -696,11 +698,11 @@ func (buf *buffer) someDigits(i, d int) int { return copy(buf.tmp[i:], buf.tmp[j:]) } -func (l *loggingT) println(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { +func (l *loggingT) println(s severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { buf, file, line := l.header(s, 0) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logr != nil { + // if logger is set, we clear the generated header as we rely on the backing + // logger implementation to print headers + if logger != nil { l.putBuffer(buf) buf = l.getBuffer() } @@ -708,18 +710,18 @@ func (l *loggingT) println(s severity, logr logr.Logger, filter LogFilter, args args = filter.Filter(args) } fmt.Fprintln(buf, args...) - l.output(s, logr, buf, file, line, false) + l.output(s, logger, buf, 0 /* depth */, file, line, false) } -func (l *loggingT) print(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { - l.printDepth(s, logr, filter, 1, args...) +func (l *loggingT) print(s severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { + l.printDepth(s, logger, filter, 1, args...) } -func (l *loggingT) printDepth(s severity, logr logr.Logger, filter LogFilter, depth int, args ...interface{}) { +func (l *loggingT) printDepth(s severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers - if logr != nil { + if logger != nil { l.putBuffer(buf) buf = l.getBuffer() } @@ -730,14 +732,14 @@ func (l *loggingT) printDepth(s severity, logr logr.Logger, filter LogFilter, de if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logr, buf, file, line, false) + l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format string, args ...interface{}) { +func (l *loggingT) printf(s severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) { buf, file, line := l.header(s, 0) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers - if logr != nil { + if logger != nil { l.putBuffer(buf) buf = l.getBuffer() } @@ -748,17 +750,17 @@ func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logr, buf, file, line, false) + l.output(s, logger, buf, 0 /* depth */, file, line, false) } // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { +func (l *loggingT) printWithFileLine(s severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers - if logr != nil { + if logger != nil { l.putBuffer(buf) buf = l.getBuffer() } @@ -769,28 +771,28 @@ func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFil if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logr, buf, file, line, alsoToStderr) + l.output(s, logger, buf, 2 /* depth */, file, line, alsoToStderr) } // if loggr is specified, will call loggr.Error, otherwise output with logging module. -func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } - if loggr != nil { - loggr.Error(err, msg, keysAndValues...) + if logger != nil { + logger.WithCallDepth(depth+2).Error(err, msg, keysAndValues...) return } l.printS(err, errorLog, depth+1, msg, keysAndValues...) } // if loggr is specified, will call loggr.Info, otherwise output with logging module. -func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } - if loggr != nil { - loggr.Info(msg, keysAndValues...) + if logger != nil { + logger.WithCallDepth(depth+2).Info(msg, keysAndValues...) return } l.printS(nil, infoLog, depth+1, msg, keysAndValues...) @@ -799,14 +801,19 @@ func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, depth int, msg str // printS is called from infoS and errorS if loggr is not specified. // set log severity by s func (l *loggingT) printS(err error, s severity, depth int, msg string, keysAndValues ...interface{}) { - b := &bytes.Buffer{} - b.WriteString(fmt.Sprintf("%q", msg)) + // Only create a new buffer if we don't have one cached. + b := l.getBuffer() + // The message is always quoted, even if it contains line breaks. + // If developers want multi-line output, they should use a small, fixed + // message and put the multi-line output into a value. + b.WriteString(strconv.Quote(msg)) if err != nil { - b.WriteByte(' ') - b.WriteString(fmt.Sprintf("err=%q", err.Error())) + kvListFormat(&b.Buffer, "err", err) } - kvListFormat(b, keysAndValues...) - l.printDepth(s, logging.logr, nil, depth+1, b) + kvListFormat(&b.Buffer, keysAndValues...) + l.printDepth(s, logging.logr, nil, depth+1, &b.Buffer) + // Make the buffer available for reuse. + l.putBuffer(b) } const missingValue = "(MISSING)" @@ -821,20 +828,109 @@ func kvListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { v = missingValue } b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if k, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(k) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } - switch v.(type) { - case string, error: - b.WriteString(fmt.Sprintf("%s=%q", k, v)) + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + switch v := v.(type) { + case fmt.Stringer: + writeStringValue(b, true, stringerToString(v)) + case string: + writeStringValue(b, true, v) + case error: + writeStringValue(b, true, v.Error()) + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) default: - if _, ok := v.(fmt.Stringer); ok { - b.WriteString(fmt.Sprintf("%s=%q", k, v)) - } else { - b.WriteString(fmt.Sprintf("%s=%+v", k, v)) - } + writeStringValue(b, false, fmt.Sprintf("%+v", v)) } } } +func stringerToString(s fmt.Stringer) (ret string) { + defer func() { + if err := recover(); err != nil { + ret = "nil" + } + }() + ret = s.String() + return +} + +func writeStringValue(b *bytes.Buffer, quote bool, v string) { + data := []byte(v) + index := bytes.IndexByte(data, '\n') + if index == -1 { + b.WriteByte('=') + if quote { + // Simple string, quote quotation marks and non-printable characters. + b.WriteString(strconv.Quote(v)) + return + } + // Non-string with no line breaks. + b.WriteString(v) + return + } + + // Complex multi-line string, show as-is with indention like this: + // I... "hello world" key=< + // line 1 + // line 2 + // > + // + // Tabs indent the lines of the value while the end of string delimiter + // is indented with a space. That has two purposes: + // - visual difference between the two for a human reader because indention + // will be different + // - no ambiguity when some value line starts with the end delimiter + // + // One downside is that the output cannot distinguish between strings that + // end with a line break and those that don't because the end delimiter + // will always be on the next line. + b.WriteString("=<\n") + for index != -1 { + b.WriteByte('\t') + b.Write(data[0 : index+1]) + data = data[index+1:] + index = bytes.IndexByte(data, '\n') + } + if len(data) == 0 { + // String ended with line break, don't add another. + b.WriteString(" >") + } else { + // No line break at end of last line, write rest of string and + // add one. + b.WriteByte('\t') + b.Write(data) + b.WriteString("\n >") + } +} + // redirectBuffer is used to set an alternate destination for the logs type redirectBuffer struct { w io.Writer @@ -855,13 +951,26 @@ func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { // SetLogger will set the backing logr implementation for klog. // If set, all log lines will be suppressed from the regular Output, and // redirected to the logr implementation. -// All log lines include the 'severity', 'file' and 'line' values attached as -// structured logging values. // Use as: // ... // klog.SetLogger(zapr.NewLogger(zapLog)) +// +// To remove a backing logr implemention, use ClearLogger. Setting an +// empty logger with SetLogger(logr.Logger{}) does not work. func SetLogger(logr logr.Logger) { - logging.logr = logr + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.logr = &logr +} + +// ClearLogger removes a backing logr implementation if one was set earlier +// with SetLogger. +func ClearLogger() { + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.logr = nil } // SetOutput sets the output destination for all severities @@ -899,8 +1008,16 @@ func LogToStderr(stderr bool) { } // output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, file string, line int, alsoToStderr bool) { +func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, file string, line int, alsoToStderr bool) { + var isLocked = true l.mu.Lock() + defer func() { + if isLocked { + // Unlock before returning in case that it wasn't done already. + l.mu.Unlock() + } + }() + if l.traceLocation.isSet() { if l.traceLocation.match(file, line) { buf.Write(stacks(false)) @@ -911,9 +1028,9 @@ func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, file string, // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} if s == errorLog { - l.logr.Error(nil, string(data)) + l.logr.WithCallDepth(depth+3).Error(nil, string(data)) } else { - log.Info(string(data)) + log.WithCallDepth(depth + 3).Info(string(data)) } } else if l.toStderr { os.Stderr.Write(data) @@ -963,6 +1080,7 @@ func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, file string, // If we got here via Exit rather than Fatal, print no stacks. if atomic.LoadUint32(&fatalNoStacks) > 0 { l.mu.Unlock() + isLocked = false timeoutFlush(10 * time.Second) os.Exit(1) } @@ -980,11 +1098,12 @@ func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, file string, } } l.mu.Unlock() + isLocked = false timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + os.Exit(255) // C++ uses -1, which is silly because it's anded(&) with 255 anyway. } l.putBuffer(buf) - l.mu.Unlock() + if stats := severityStats[s]; stats != nil { atomic.AddInt64(&stats.lines, 1) atomic.AddInt64(&stats.bytes, int64(len(data))) @@ -1264,7 +1383,7 @@ func (l *loggingT) setV(pc uintptr) Level { // See the documentation of V for more information. type Verbose struct { enabled bool - logr logr.Logger + logr *logr.Logger filter LogFilter } @@ -1272,7 +1391,8 @@ func newVerbose(level Level, b bool) Verbose { if logging.logr == nil { return Verbose{b, nil, logging.filter} } - return Verbose{b, logging.logr.V(int(level)), logging.filter} + v := logging.logr.V(int(level)) + return Verbose{b, &v, logging.filter} } // V reports whether verbosity at the call site is at least the requested level. @@ -1310,9 +1430,14 @@ func V(level Level) Verbose { if runtime.Callers(2, logging.pcs[:]) == 0 { return newVerbose(level, false) } - v, ok := logging.vmap[logging.pcs[0]] + // runtime.Callers returns "return PCs", but we want + // to look up the symbolic information for the call, + // so subtract 1 from the PC. runtime.CallersFrames + // would be cleaner, but allocates. + pc := logging.pcs[0] - 1 + v, ok := logging.vmap[pc] if !ok { - v = logging.setV(logging.pcs[0]) + v = logging.setV(pc) } return newVerbose(level, v >= level) } @@ -1364,6 +1489,14 @@ func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { logging.infoS(logging.logr, logging.filter, depth, msg, keysAndValues...) } +// InfoSDepth is equivalent to the global InfoSDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { + if v.enabled { + logging.infoS(v.logr, v.filter, depth, msg, keysAndValues...) + } +} + // Deprecated: Use ErrorS instead. func (v Verbose) Error(err error, msg string, args ...interface{}) { if v.enabled { @@ -1568,6 +1701,15 @@ func (ref ObjectRef) String() string { return ref.Name } +// MarshalLog ensures that loggers with support for structured output will log +// as a struct by removing the String method via a custom type. +func (ref ObjectRef) MarshalLog() interface{} { + type or ObjectRef + return or(ref) +} + +var _ logr.Marshaler = ObjectRef{} + // KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface // this interface may expand in the future, but will always be a subset of the // kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface @@ -1598,3 +1740,20 @@ func KRef(namespace, name string) ObjectRef { Namespace: namespace, } } + +// KObjs returns slice of ObjectRef from an slice of ObjectMeta +func KObjs(arg interface{}) []ObjectRef { + s := reflect.ValueOf(arg) + if s.Kind() != reflect.Slice { + return nil + } + objectRefs := make([]ObjectRef, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + if v, ok := s.Index(i).Interface().(KMetadata); ok { + objectRefs = append(objectRefs, KObj(v)) + } else { + return nil + } + } + return objectRefs +} diff --git a/vendor/k8s.io/klog/v2/klog_file.go b/vendor/k8s.io/klog/v2/klog_file.go index de830d9221..1025d644f3 100644 --- a/vendor/k8s.io/klog/v2/klog_file.go +++ b/vendor/k8s.io/klog/v2/klog_file.go @@ -22,9 +22,7 @@ import ( "errors" "fmt" "os" - "os/user" "path/filepath" - "runtime" "strings" "sync" "time" @@ -57,38 +55,6 @@ func init() { } } -func getUserName() string { - userNameOnce.Do(func() { - // On Windows, the Go 'user' package requires netapi32.dll. - // This affects Windows Nano Server: - // https://github.com/golang/go/issues/21867 - // Fallback to using environment variables. - if runtime.GOOS == "windows" { - u := os.Getenv("USERNAME") - if len(u) == 0 { - return - } - // Sanitize the USERNAME since it may contain filepath separators. - u = strings.Replace(u, `\`, "_", -1) - - // user.Current().Username normally produces something like 'USERDOMAIN\USERNAME' - d := os.Getenv("USERDOMAIN") - if len(d) != 0 { - userName = d + "_" + u - } else { - userName = u - } - } else { - current, err := user.Current() - if err == nil { - userName = current.Username - } - } - }) - - return userName -} - // shortHostname returns its argument, truncating at the first period. // For instance, given "www.google.com" it returns "www". func shortHostname(hostname string) string { diff --git a/vendor/k8s.io/klog/v2/klog_file_others.go b/vendor/k8s.io/klog/v2/klog_file_others.go new file mode 100644 index 0000000000..aa46726851 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klog_file_others.go @@ -0,0 +1,19 @@ +//go:build !windows +// +build !windows + +package klog + +import ( + "os/user" +) + +func getUserName() string { + userNameOnce.Do(func() { + current, err := user.Current() + if err == nil { + userName = current.Username + } + }) + + return userName +} diff --git a/vendor/k8s.io/klog/v2/klog_file_windows.go b/vendor/k8s.io/klog/v2/klog_file_windows.go new file mode 100644 index 0000000000..2517f9c538 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klog_file_windows.go @@ -0,0 +1,34 @@ +//go:build windows +// +build windows + +package klog + +import ( + "os" + "strings" +) + +func getUserName() string { + userNameOnce.Do(func() { + // On Windows, the Go 'user' package requires netapi32.dll. + // This affects Windows Nano Server: + // https://github.com/golang/go/issues/21867 + // Fallback to using environment variables. + u := os.Getenv("USERNAME") + if len(u) == 0 { + return + } + // Sanitize the USERNAME since it may contain filepath separators. + u = strings.Replace(u, `\`, "_", -1) + + // user.Current().Username normally produces something like 'USERDOMAIN\USERNAME' + d := os.Getenv("USERDOMAIN") + if len(d) != 0 { + userName = d + "_" + u + } else { + userName = u + } + }) + + return userName +} diff --git a/vendor/knative.dev/hack/library.sh b/vendor/knative.dev/hack/library.sh index 0877aace45..4af73bec50 100644 --- a/vendor/knative.dev/hack/library.sh +++ b/vendor/knative.dev/hack/library.sh @@ -64,6 +64,31 @@ fi # On a Prow job, redirect stderr to stdout so it's synchronously added to log (( IS_PROW )) && exec 2>&1 +# Return the major version of a release. +# For example, "v0.2.1" returns "0" +# Parameters: $1 - release version label. +function major_version() { + local release="${1//v/}" + local tokens=(${release//\./ }) + echo "${tokens[0]}" +} + +# Return the minor version of a release. +# For example, "v0.2.1" returns "2" +# Parameters: $1 - release version label. +function minor_version() { + local tokens=(${1//\./ }) + echo "${tokens[1]}" +} + +# Return the release build number of a release. +# For example, "v0.2.1" returns "1". +# Parameters: $1 - release version label. +function patch_version() { + local tokens=(${1//\./ }) + echo "${tokens[2]}" +} + # Print error message and exit 1 # Parameters: $1..$n - error message to be displayed function abort() { @@ -746,6 +771,7 @@ function current_branch() { # Get the branch name from Prow's env var, see https://github.com/kubernetes/test-infra/blob/master/prow/jobs.md. # Otherwise, try getting the current branch from git. (( IS_PROW )) && branch_name="${PULL_BASE_REF:-}" + [[ -z "${branch_name}" ]] && branch_name="${GITHUB_BASE_REF:-}" [[ -z "${branch_name}" ]] && branch_name="$(git rev-parse --abbrev-ref HEAD)" echo "${branch_name}" } @@ -810,33 +836,56 @@ function shellcheck_new_files() { fi } +# Note: if using Github checkout action please ensure you fetch all tags prior to calling +# this function +# +# ie. +# - uses: actions/checkout@v2 +# with: +# fetch-depth: 0 +# +# See: https://github.com/actions/checkout#fetch-all-history-for-all-tags-and-branches function latest_version() { - # This function works "best effort" and works on Prow but not necessarily locally. - # The problem is finding the latest release. If a release occurs on the same commit which - # was branched from main, then the tag will be an ancestor to any commit derived from main. - # That was the original logic. Additionally in a release branch, the tag is always an ancestor. - # However, if the release commit ends up not the first commit from main, then the tag is not - # an ancestor of main, so we can't use `git describe` to find the most recent versioned tag. So - # we just sort all the tags and find the newest versioned one. - # But when running locally, we cannot(?) know if the current branch is a fork of main or a fork - # of a release branch. That's where this function will malfunction when the last release did not - # occur on the first commit -- it will try to run the upgrade tests from an older version instead - # of the most recent release. - # Workarounds include: - # Tag the first commit of the release branch. Say release-0.75 released v0.75.0 from the second commit - # Then tag the first commit in common between main and release-0.75 with `v0.75`. - # Always name your local fork master or main. - if [ $(current_branch) = "master" ] || [ $(current_branch) = "main" ]; then - # For main branch, simply use git tag without major version, this will work even - # if the release tag is not in the main - git tag -l "v[0-9]*" | sort -r --version-sort | head -n1 + local branch_name="$(current_branch)" + + # Use the latest release for main + if [[ "$branch_name" == "main" ]] || [[ "$branch_name" == "master" ]]; then + git tag -l "*$(git tag -l "*v[0-9]*" | cut -d '-' -f2 | sort -r --version-sort | head -n1)*" + return + fi + + # Ideally we shouldn't need to treat release branches differently but + # there are scenarios where git describe will return newer tags than + # the ones on the current branch + # + # ie. create a PR pulling commits from 0.24 into a release-0.23 branch + if [[ "$branch_name" == "release-"* ]]; then + # Infer major, minor version from the branch name + local tag="${branch_name##release-}" else - local semver=$(git describe --match "v[0-9]*" --abbrev=0) - local major_minor=$(echo "$semver" | cut -d. -f1-2) + # Nearest tag with the `knative-` prefix + local tag=$(git describe --abbrev=0 --match "knative-v[0-9]*") - # Get the latest patch release for the major minor - git tag -l "${major_minor}*" | sort -r --version-sort | head -n1 + # Fallback to older tag scheme vX.Y.Z + [[ -z "${tag}" ]] && tag=$(git describe --abbrev=0 --match "v[0-9]*") + + # Drop the prefix + tag="${tag##knative-}" fi + + local major_version="$(major_version ${tag})" + local minor_version="$(minor_version ${tag})" + + # Hardcode the jump back from 1.0 + if [ "$major_version" = "1" ] && [ "$minor_version" = "0" ]; then + local tag_filter='v0.26*' + else + # Adjust the minor down by one + local tag_filter="*v$major_version.$(( minor_version - 1 ))*" + fi + + # Get the latest patch release for the major minor + git tag -l "${tag_filter}" | sort -r --version-sort | head -n1 } # Initializations that depend on previous functions. diff --git a/vendor/knative.dev/hack/release.sh b/vendor/knative.dev/hack/release.sh index 0f7c473245..5e11e87fa3 100644 --- a/vendor/knative.dev/hack/release.sh +++ b/vendor/knative.dev/hack/release.sh @@ -131,22 +131,6 @@ function master_version() { echo "${tokens[0]}.${tokens[1]}" } -# Return the minor version of a release. -# For example, "v0.2.1" returns "2" -# Parameters: $1 - release version label. -function minor_version() { - local tokens=(${1//\./ }) - echo "${tokens[1]}" -} - -# Return the release build number of a release. -# For example, "v0.2.1" returns "1". -# Parameters: $1 - release version label. -function patch_version() { - local tokens=(${1//\./ }) - echo "${tokens[2]}" -} - # Return the short commit SHA from a release tag. # For example, "v20010101-deadbeef" returns "deadbeef". function hash_from_tag() { @@ -622,8 +606,6 @@ function publish_to_github() { if [[ -n "${RELEASE_NOTES}" ]]; then cat "${RELEASE_NOTES}" >> "${description}" fi - git tag -a "${github_tag}" -m "${title}" - git_push tag "${github_tag}" # Include a tag for the go module version # @@ -637,8 +619,14 @@ function publish_to_github() { local go_module_version="v0.$(( release_minor + 27 )).$(patch_version $TAG)" git tag -a "${go_module_version}" -m "${title}" git_push tag "${go_module_version}" + else + # Pre-1.0 - use the tag as the release tag + github_tag="${TAG}" fi + git tag -a "${github_tag}" -m "${title}" + git_push tag "${github_tag}" + [[ -n "${RELEASE_BRANCH}" ]] && commitish="--commitish=${RELEASE_BRANCH}" for i in {2..0}; do hub_tool release create \ diff --git a/vendor/knative.dev/kn-plugin-event/LICENSE b/vendor/knative.dev/kn-plugin-event/LICENSE new file mode 100644 index 0000000000..059b110a18 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 Knative CLI Event Contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/build.go b/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/build.go new file mode 100644 index 0000000000..4857e40bc7 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/build.go @@ -0,0 +1,51 @@ +package cmd + +import ( + "errors" + "fmt" + + "github.com/spf13/cobra" + "knative.dev/kn-plugin-event/pkg/cli" + "knative.dev/kn-plugin-event/pkg/configuration" +) + +// ErrCantBePresented is returned if data can't be presented. +var ErrCantBePresented = errors.New("can't be presented") + +type buildCommand struct { + *Cmd + event *cli.EventArgs +} + +func (b *buildCommand) command() *cobra.Command { + c := &cobra.Command{ + Use: "build", + Short: "Builds a CloudEvent and print it to stdout", + RunE: b.run, + } + addBuilderFlags(b.event, c) + return c +} + +func (b *buildCommand) run(cmd *cobra.Command, _ []string) error { + b.options.OutWriter = cmd.OutOrStdout() + b.options.ErrWriter = cmd.ErrOrStderr() + c := configuration.CreateCli() + ce, err := c.CreateWithArgs(b.event) + if err != nil { + return cantBuildEventError(err) + } + out, err := c.PresentWith(ce, b.options.Output) + if err != nil { + return fmt.Errorf("event %w: %v", ErrCantBePresented, err) + } + cmd.Println(out) + return nil +} + +func cantBuildEventError(err error) error { + if errors.Is(err, cli.ErrCantBuildEvent) { + return err + } + return fmt.Errorf("%w: %v", cli.ErrCantBuildEvent, err) +} diff --git a/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/builder.go b/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/builder.go new file mode 100644 index 0000000000..5fd51630f5 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/builder.go @@ -0,0 +1,36 @@ +package cmd + +import ( + "github.com/spf13/cobra" + "knative.dev/kn-plugin-event/pkg/cli" + "knative.dev/kn-plugin-event/pkg/event" +) + +func addBuilderFlags(eventArgs *cli.EventArgs, c *cobra.Command) { + fs := c.Flags() + fs.StringVarP( + &eventArgs.Type, "type", "t", event.DefaultType, + "Specify a type of a CloudEvent", + ) + fs.StringVarP( + &eventArgs.ID, "id", "i", event.NewID(), + "Specify a CloudEvent ID", + ) + fs.StringVarP( + &eventArgs.Source, "source", "s", event.DefaultSource(), + "Specify a source of an CloudEvent", + ) + fs.StringArrayVarP( + &eventArgs.Fields, "field", "f", make([]string, 0), + `Specify a field for data of an CloudEvent. Field should be specified as +jsonpath expression followed by equal sign and then a value. Value will be +resolved to be used in exact type. Example: +"person.age=18".`, + ) + fs.StringArrayVar( + &eventArgs.RawFields, "raw-field", make([]string, 0), + `Specify a raw field for data of an CloudEvent. Raw field should be +specified as jsonpath expression followed by equal sign and then a value. The +value will be used as string. Example: "person.name=John".`, + ) +} diff --git a/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/root.go b/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/root.go new file mode 100644 index 0000000000..947f31920e --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/root.go @@ -0,0 +1,124 @@ +package cmd + +import ( + "fmt" + "io" + "os" + + "github.com/spf13/cobra" + "github.com/thediveo/enumflag" + + // for kubeconfig auth plugins to work correctly see issue #24 . + _ "k8s.io/client-go/plugin/pkg/client/auth" + "knative.dev/kn-plugin-event/pkg/cli" + "knative.dev/kn-plugin-event/pkg/cli/retcode" + "knative.dev/kn-plugin-event/pkg/metadata" +) + +// Cmd represents a command line application entrypoint. +type Cmd struct { + options *cli.Options + root *cobra.Command + exit func(code int) +} + +// Execute will execute the application. +func (c *Cmd) Execute() { + if err := c.execute(); err != nil { + c.exit(retcode.Calc(err)) + } +} + +// ExecuteWithOptions will execute the application with the provided options. +func (c *Cmd) ExecuteWithOptions(options ...CommandOption) error { + return c.execute(options...) +} + +// WithArgs creates an option which sets args. +func WithArgs(args ...string) CommandOption { + return func(command *cobra.Command) { + command.SetArgs(args) + } +} + +// WithOutput creates an option witch sets os.Stdout and os.Stderr. +func WithOutput(out io.Writer) CommandOption { + return func(command *cobra.Command) { + command.SetOut(out) + command.SetErr(out) + } +} + +// CommandOption is used to configure a command in Cmd.ExecuteWithOptions. +type CommandOption func(*cobra.Command) + +func (c *Cmd) execute(configs ...CommandOption) error { + c.init() + for _, config := range configs { + config(c.root) + } + // cobra.Command should pass our own errors, no need to wrap them. + return c.root.Execute() //nolint:wrapcheck +} + +func (c *Cmd) init() { + if c.root != nil { + return + } + c.exit = os.Exit + c.options = &cli.Options{} + c.root = &cobra.Command{ + Use: metadata.PluginUse, + Aliases: []string{fmt.Sprintf("kn %s", metadata.PluginUse)}, + Short: metadata.PluginDescription, + Long: metadata.PluginLongDescription, + } + c.root.SetOut(os.Stdout) + c.root.SetErr(os.Stderr) + c.root.PersistentFlags().BoolVarP( + &c.options.Verbose, "verbose", "v", + false, "verbose output", + ) + c.root.PersistentFlags().VarP( + enumflag.New(&c.options.Output, "output", outputModeIds(), enumflag.EnumCaseInsensitive), + "output", "o", + "Output format. One of: human|json|yaml.", + ) + + eventArgs := &cli.EventArgs{} + targetArgs := &cli.TargetArgs{} + commands := []subcommand{ + &buildCommand{Cmd: c, event: eventArgs}, + &sendCommand{Cmd: c, event: eventArgs, target: targetArgs}, + &versionCommand{Cmd: c}, + } + for _, each := range commands { + c.root.AddCommand(each.command()) + } + + c.root.PersistentFlags().StringVar( + &c.options.KubeconfigOptions.Path, "kubeconfig", "", + "kubectl configuration file (default: ~/.kube/config)", + ) + c.root.PersistentFlags().StringVar( + &c.options.KubeconfigOptions.Context, "context", "", + "name of the kubeconfig context to use", + ) + c.root.PersistentFlags().StringVar( + &c.options.KubeconfigOptions.Cluster, "cluster", "", + "name of the kubeconfig cluster to use", + ) + + c.root.PersistentPreRun = func(cmd *cobra.Command, args []string) { + c.options.OutWriter = cmd.OutOrStdout() + c.options.ErrWriter = cmd.ErrOrStderr() + } +} + +func outputModeIds() map[cli.OutputMode][]string { + return map[cli.OutputMode][]string{ + cli.HumanReadable: {"human"}, + cli.JSON: {"json"}, + cli.YAML: {"yaml"}, + } +} diff --git a/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/send.go b/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/send.go new file mode 100644 index 0000000000..acaf762836 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/send.go @@ -0,0 +1,95 @@ +package cmd + +import ( + "errors" + "fmt" + + "github.com/spf13/cobra" + "knative.dev/kn-plugin-event/pkg/cli" + "knative.dev/kn-plugin-event/pkg/configuration" + "knative.dev/kn-plugin-event/pkg/event" +) + +var ( + // ErrSendTargetValidationFailed is returned if a send target can't pass a + // validation. + ErrSendTargetValidationFailed = errors.New("send target validation failed") + + // ErrCantSendEvent is returned if event can't be sent. + ErrCantSendEvent = errors.New("can't send event") +) + +type sendCommand struct { + target *cli.TargetArgs + event *cli.EventArgs + *Cmd +} + +func (s *sendCommand) command() *cobra.Command { + c := &cobra.Command{ + Use: "send", + Short: "Builds and sends a CloudEvent to recipient", + RunE: s.run, + } + addBuilderFlags(s.event, c) + c.Flags().StringVarP( + &s.target.URL, "to-url", "u", "", + `Specify an URL to send event to. This option can't be used with +--to option.`, + ) + c.Flags().StringVarP( + &s.target.Addressable, "to", "r", "", + `Specify an addressable resource to send event to. This argument +takes format kind:apiVersion:name for named resources or +kind:apiVersion:labelKey1=value1,labelKey2=value2 for matching via a +label selector. This option can't be used with --to-url option.`, + ) + c.Flags().StringVarP( + &s.target.Namespace, "namespace", "n", "", + `Specify a namespace of addressable resource defined with --to +option. If this option isn't specified a current context namespace will be used +to find addressable resource. This option can't be used with --to-url option.`, + ) + c.Flags().StringVar( + &s.target.SenderNamespace, "sender-namespace", "", + `Specify a namespace of sender job to be created. While using --to +option, event is send within a cluster. To do that kn-event uses a special Job +that is deployed to cluster in namespace dictated by --sender-namespace. If +this option isn't specified a current context namespace will be used. This +option can't be used with --to-url option.`, + ) + c.Flags().StringVar( + &s.target.AddressableURI, "addressable-uri", "", + `Specify an URI of a target addressable resource. If this option +isn't specified target URL will not be changed. This option can't be used with +--to-url option.`, + ) + c.PreRunE = func(cmd *cobra.Command, args []string) error { + err := cli.ValidateTarget(s.target) + if err != nil { + return fmt.Errorf("%w: %v", ErrSendTargetValidationFailed, err) + } + return nil + } + return c +} + +func (s *sendCommand) run(_ *cobra.Command, _ []string) error { + c := configuration.CreateCli() + ce, err := c.CreateWithArgs(s.event) + if err != nil { + return cantBuildEventError(err) + } + err = c.Send(*ce, *s.target, s.options) + if err != nil { + return cantSentEvent(err) + } + return nil +} + +func cantSentEvent(err error) error { + if errors.Is(err, event.ErrCantSentEvent) { + return err + } + return fmt.Errorf("%w: %v", event.ErrCantSentEvent, err) +} diff --git a/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/testing.go b/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/testing.go new file mode 100644 index 0000000000..cc64400988 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/testing.go @@ -0,0 +1,46 @@ +package cmd + +import "io" + +// TestingCmd a wrapper for Cmd to ease of testing. +type TestingCmd struct { + *Cmd +} + +// ExecuteOrFail the command or fail on error. +func (c *TestingCmd) ExecuteOrFail() { + c.init() + c.Cmd.Execute() +} + +// Execute the command and return error if any. +func (c *TestingCmd) Execute() error { + c.init() + return c.execute() +} + +// Exit sets the exit command that accepts retcode. +func (c *TestingCmd) Exit(fn func(code int)) { + c.init() + c.exit = fn +} + +// Out sets output stream to cmd. +func (c *TestingCmd) Out(newOut io.Writer) { + c.init() + c.root.SetOut(newOut) + c.root.SetErr(newOut) +} + +// Args set to main command to be executed. +func (c *TestingCmd) Args(args ...string) { + c.init() + c.root.SetArgs(args) +} + +func (c *TestingCmd) init() { + if c.Cmd == nil { + c.Cmd = &Cmd{} + } + c.Cmd.init() +} diff --git a/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/types.go b/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/types.go new file mode 100644 index 0000000000..01048b77e1 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/types.go @@ -0,0 +1,7 @@ +package cmd + +import "github.com/spf13/cobra" + +type subcommand interface { + command() *cobra.Command +} diff --git a/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/version.go b/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/version.go new file mode 100644 index 0000000000..224ee9a18f --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/internal/cli/cmd/version.go @@ -0,0 +1,64 @@ +package cmd + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" + "knative.dev/kn-plugin-event/pkg/cli" + "knative.dev/kn-plugin-event/pkg/metadata" +) + +// ErrUnsupportedOutputMode is returned if user passed a unsupported +// output mode. +var ErrUnsupportedOutputMode = errors.New("unsupported mode") + +type versionCommand struct { + *Cmd +} + +func (v *versionCommand) command() *cobra.Command { + return &cobra.Command{ + Use: "version", + Short: "Prints the kn event plugin version", + RunE: v.run, + } +} + +func (v *versionCommand) run(cmd *cobra.Command, _ []string) error { + output, err := presentAs(cli.PluginVersionOutput{ + Name: metadata.PluginName, + Version: metadata.Version, + Image: metadata.ResolveImage(), + }, v.options.Output) + if err != nil { + return err + } + cmd.Println(output) + return nil +} + +func presentAs(pv cli.PluginVersionOutput, mode cli.OutputMode) (string, error) { + switch mode { + case cli.JSON: + return marshalWith(pv, json.Marshal) + case cli.YAML: + return marshalWith(pv, yaml.Marshal) + case cli.HumanReadable: + return fmt.Sprintf("%s version: %s\nsender image: %s", + pv.Name, pv.Version, pv.Image), nil + } + return "", fmt.Errorf("%w: %v", ErrUnsupportedOutputMode, mode) +} + +type marshalFunc func(in interface{}) (out []byte, err error) + +func marshalWith(pv cli.PluginVersionOutput, marchaller marshalFunc) (string, error) { + bytes, err := marchaller(pv) + if err != nil { + return "", fmt.Errorf("version %w: %v", ErrCantBePresented, err) + } + return string(bytes), err +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/cli/create.go b/vendor/knative.dev/kn-plugin-event/pkg/cli/create.go new file mode 100644 index 0000000000..bc5d1d72fe --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/cli/create.go @@ -0,0 +1,169 @@ +package cli + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + "time" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/ghodss/yaml" + "knative.dev/kn-plugin-event/pkg/event" +) + +const ( + fieldAssigmentSize = 2 + decimalBase = 10 + precision64BitSize = 64 +) + +var ( + // ErrUnsupportedOutputMode if user passed unsupported output mode. + ErrUnsupportedOutputMode = errors.New("unsupported output mode") + + // ErrInvalidFormat if user pass an un-parsable format. + ErrInvalidFormat = errors.New("invalid format") + + // ErrCantBuildEvent if event can't be built. + ErrCantBuildEvent = errors.New("can't build event") + + // ErrCantMarshalEvent if event can't be marshalled to text. + ErrCantMarshalEvent = errors.New("can't marshal event") +) + +// CreateWithArgs will create an event by parsing given args. +func (c *App) CreateWithArgs(args *EventArgs) (*cloudevents.Event, error) { + spec := &event.Spec{ + Type: args.Type, + ID: args.ID, + Source: args.Source, + Fields: make([]event.FieldSpec, 0, len(args.Fields)+len(args.RawFields)), + } + for _, fieldAssigment := range args.Fields { + split := strings.SplitN(fieldAssigment, "=", fieldAssigmentSize) + path, value := split[0], split[1] + var floatVal float64 + if boolVal, err := readAsBoolean(value); err == nil { + spec.AddField(path, boolVal) + } else if floatVal, err = readAsFloat64(value); err == nil { + spec.AddField(path, floatVal) + } else { + spec.AddField(path, value) + } + } + for _, fieldAssigment := range args.RawFields { + split := strings.SplitN(fieldAssigment, "=", fieldAssigmentSize) + path, value := split[0], split[1] + spec.AddField(path, value) + } + ce, err := event.CreateFromSpec(spec) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrCantBuildEvent, err) + } + return ce, nil +} + +// PresentWith will present an event with specified output. +func (c *App) PresentWith(e *cloudevents.Event, output OutputMode) (string, error) { + switch output { + case HumanReadable: + return presentEventAsHumanReadable(e) + case JSON: + return presentEventAsJSON(e) + case YAML: + return presentEventAsYaml(e) + } + return "", fmt.Errorf("%w: %v", ErrUnsupportedOutputMode, output) +} + +func presentEventAsYaml(in *cloudevents.Event) (string, error) { + bytes, err := yaml.Marshal(in) + if err != nil { + return "", fmt.Errorf("%w: %v", ErrCantMarshalEvent, err) + } + return string(bytes), nil +} + +func presentEventAsJSON(event *cloudevents.Event) (string, error) { + bytes, err := json.MarshalIndent(event, "", " ") + if err != nil { + return "", fmt.Errorf("%w: %v", ErrCantMarshalEvent, err) + } + return string(bytes), nil +} + +func presentEventAsHumanReadable(e *cloudevents.Event) (string, error) { + formattedTime := e.Time(). + In(time.UTC). + Format(time.RFC3339Nano) + m := map[string]interface{}{} + err := json.Unmarshal(e.Data(), &m) + if err != nil { + return "", fmt.Errorf("%w: %v", ErrCantMarshalEvent, err) + } + data, err := json.MarshalIndent(m, " ", " ") + if err != nil { + return "", fmt.Errorf("%w: %v", ErrCantMarshalEvent, err) + } + return fmt.Sprintf( + `☁️ cloudevents.Event +Validation: valid +Context Attributes, + specversion: %s + type: %s + source: %s + id: %s + time: %s + datacontenttype: %s +Data, + %s`, + e.SpecVersion(), + e.Type(), + e.Source(), + e.ID(), + formattedTime, + e.DataContentType(), + string(data), + ), nil +} + +func readAsBoolean(in string) (bool, error) { + val, err := strconv.ParseBool(in) + // TODO(cardil): log error as it may be beneficial for debugging + if err != nil { + return false, fmt.Errorf("%w: %v", ErrInvalidFormat, err) + } + if text := fmt.Sprintf("%t", val); in == text { + return val, nil + } + return false, fmt.Errorf("%w: not a bool: %v", ErrInvalidFormat, in) +} + +func readAsFloat64(in string) (float64, error) { + if intVal, err := readAsInt64(in); err == nil { + return float64(intVal), nil + } + val, err := strconv.ParseFloat(in, precision64BitSize) + // TODO(cardil): log error as it may be beneficial for debugging + if err != nil { + return -0, fmt.Errorf("%w: %v", ErrInvalidFormat, err) + } + if text := fmt.Sprintf("%f", val); in == text { + return val, nil + } + return -0, fmt.Errorf("%w: not a float: %v", ErrInvalidFormat, in) +} + +func readAsInt64(in string) (int64, error) { + val, err := strconv.ParseInt(in, decimalBase, precision64BitSize) + // TODO(cardil): log error as it may be beneficial for debugging + if err != nil { + return -0, fmt.Errorf("%w: %v", ErrInvalidFormat, err) + } + if text := fmt.Sprintf("%d", val); in == text { + return val, nil + } + return -0, fmt.Errorf("%w: not an int: %v", ErrInvalidFormat, in) +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/cli/ics/encoding.go b/vendor/knative.dev/kn-plugin-event/pkg/cli/ics/encoding.go new file mode 100644 index 0000000000..9cc72ded51 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/cli/ics/encoding.go @@ -0,0 +1,58 @@ +package ics + +import ( + "bytes" + "compress/zlib" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + cloudevents "github.com/cloudevents/sdk-go/v2" +) + +// Encode will encode a cloud event to ICS encoding form: +// Base64(zlib(minimal JSON)). +func Encode(ce cloudevents.Event) (string, error) { + bb, err := json.Marshal(ce) + if err != nil { + return "", fmt.Errorf("%w: %v", ErrCouldntEncode, err) + } + var b bytes.Buffer + encoder := base64.NewEncoder(base64.RawURLEncoding, &b) + w := zlib.NewWriter(encoder) + _, err = w.Write(bb) + if err != nil { + return "", fmt.Errorf("%w: %v", ErrCouldntEncode, err) + } + err = w.Close() + if err != nil { + return "", fmt.Errorf("%w: %v", ErrCouldntEncode, err) + } + err = encoder.Close() + if err != nil { + return "", fmt.Errorf("%w: %v", ErrCouldntEncode, err) + } + return b.String(), nil +} + +// Decode will decode an event from ICS encoding. +func Decode(encoded string) (*cloudevents.Event, error) { + r := strings.NewReader(encoded) + decoder := base64.NewDecoder(base64.RawURLEncoding, r) + reader, err := zlib.NewReader(decoder) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrCouldntDecode, err) + } + bb, err := ioutil.ReadAll(reader) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrCouldntDecode, err) + } + ce := &cloudevents.Event{} + err = json.Unmarshal(bb, ce) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrCouldntDecode, err) + } + return ce, nil +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/cli/ics/send.go b/vendor/knative.dev/kn-plugin-event/pkg/cli/ics/send.go new file mode 100644 index 0000000000..e7a6969b7b --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/cli/ics/send.go @@ -0,0 +1,59 @@ +package ics + +import ( + "fmt" + "net/url" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/kelseyhightower/envconfig" + "knative.dev/kn-plugin-event/pkg/event" +) + +// SendFromEnv will send an event based on a values stored in environmental +// variables. +func (app *App) SendFromEnv() error { + c, err := app.configure() + if err != nil { + return err + } + err = c.sender.Send(*c.ce) + if err != nil { + return fmt.Errorf("%w: %v", ErrCantSendWithICS, err) + } + return nil +} + +func (app *App) configure() (config, error) { + args := &Args{ + Sink: "localhost", + } + err := envconfig.Process("K", args) + if err != nil { + return config{}, fmt.Errorf("%w: %v", ErrCantConfigureICS, err) + } + u, err := url.Parse(args.Sink) + if err != nil { + return config{}, fmt.Errorf("%w: %v", ErrCantConfigureICS, err) + } + target := &event.Target{ + Type: event.TargetTypeReachable, + URLVal: u, + } + s, err := app.Binding.CreateSender(target) + if err != nil { + return config{}, fmt.Errorf("%w: %v", ErrCantConfigureICS, err) + } + ce, err := Decode(args.Event) + if err != nil { + return config{}, err + } + return config{ + sender: s, + ce: ce, + }, nil +} + +type config struct { + sender event.Sender + ce *cloudevents.Event +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/cli/ics/types.go b/vendor/knative.dev/kn-plugin-event/pkg/cli/ics/types.go new file mode 100644 index 0000000000..c2c0a7cc5d --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/cli/ics/types.go @@ -0,0 +1,33 @@ +package ics + +import ( + "errors" + + "knative.dev/kn-plugin-event/pkg/event" +) + +var ( + // ErrCouldntEncode is returned when problem occur while trying to encode an + // event. + ErrCouldntEncode = errors.New("couldn't encode an event") + // ErrCouldntDecode is returned when problem occur while trying to decode an + // event. + ErrCouldntDecode = errors.New("couldn't decode an event") + // ErrCantConfigureICS is returned when problem occur while trying to + // configure ICS sender. + ErrCantConfigureICS = errors.New("can't configure ICS sender") + // ErrCantSendWithICS if can't send with ICS sender. + ErrCantSendWithICS = errors.New("can't send with ICS sender") +) + +// Args holds a list of args for in-cluster-sender. +type Args struct { + Sink string + CeOverrides string + Event string +} + +// App holds an ICS app binding. +type App struct { + event.Binding +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/cli/options.go b/vendor/knative.dev/kn-plugin-event/pkg/cli/options.go new file mode 100644 index 0000000000..513ce7238a --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/cli/options.go @@ -0,0 +1,114 @@ +package cli + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/ghodss/yaml" + "go.uber.org/zap" + "go.uber.org/zap/buffer" + "go.uber.org/zap/zapcore" + "knative.dev/kn-plugin-event/pkg/event" +) + +// WithLogger will create an event suitable Options from CLI ones. +func (opts *Options) WithLogger() (*event.Properties, error) { + zc := zap.NewProductionConfig() + cfg := zap.NewProductionEncoderConfig() + if opts.Verbose { + cfg = zap.NewDevelopmentEncoderConfig() + } + cfg.EncodeTime = zapcore.RFC3339NanoTimeEncoder + var encoder zapcore.Encoder + switch opts.Output { + case HumanReadable: + if !opts.Verbose { + cfg.CallerKey = "" + } + cfg.ConsoleSeparator = " " + cfg.EncodeLevel = alignCapitalColorLevelEncoder + cfg.EncodeTime = zapcore.TimeEncoderOfLayout(time.StampMilli) + encoder = zapcore.NewConsoleEncoder(cfg) + case YAML: + encoder = &yamlEncoder{zapcore.NewJSONEncoder(cfg)} + case JSON: + encoder = zapcore.NewJSONEncoder(cfg) + } + sink := zapcore.AddSync(opts.OutWriter) + errSink := zapcore.AddSync(opts.ErrWriter) + zcore := zapcore.NewCore(encoder, sink, zc.Level) + log := zap.New( + zcore, buildOptions(zc, errSink)..., + ) + + return &event.Properties{ + KnPluginOptions: opts.KnPluginOptions, + Log: log.Sugar(), + }, nil +} + +func alignCapitalColorLevelEncoder(l zapcore.Level, enc zapcore.PrimitiveArrayEncoder) { + spaces := len(zapcore.FatalLevel.CapitalString()) - len(l.CapitalString()) + if spaces > 0 { + enc.AppendString(strings.Repeat(" ", spaces)) + } + zapcore.CapitalColorLevelEncoder(l, enc) +} + +func buildOptions(cfg zap.Config, errSink zapcore.WriteSyncer) []zap.Option { + opts := []zap.Option{zap.ErrorOutput(errSink)} + + if cfg.Development { + opts = append(opts, zap.Development()) + } + + if !cfg.DisableCaller { + opts = append(opts, zap.AddCaller()) + } + + stackLevel := zap.ErrorLevel + if cfg.Development { + stackLevel = zap.WarnLevel + } + if !cfg.DisableStacktrace { + opts = append(opts, zap.AddStacktrace(stackLevel)) + } + + return opts +} + +type yamlEncoder struct { + zapcore.Encoder +} + +func (y *yamlEncoder) EncodeEntry(entry zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) { + buf, err := y.Encoder.EncodeEntry(entry, fields) + if err != nil { + return nil, unexpected(err) + } + var v interface{} + err = json.Unmarshal(buf.Bytes(), &v) + if err != nil { + return nil, unexpected(err) + } + bytes, err := yaml.Marshal(v) + if err != nil { + return nil, unexpected(err) + } + buf = buffer.NewPool().Get() + _, _ = buf.Write([]byte("---\n")) + if _, err = buf.Write(bytes); err != nil { + return nil, unexpected(err) + } + return buf, nil +} + +func unexpected(err error) error { + if errors.Is(err, event.ErrUnexpected) { + return err + } + return fmt.Errorf("%w: %v", event.ErrUnexpected, err) +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/cli/retcode/retcode.go b/vendor/knative.dev/kn-plugin-event/pkg/cli/retcode/retcode.go new file mode 100644 index 0000000000..c3507cbe3a --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/cli/retcode/retcode.go @@ -0,0 +1,11 @@ +package retcode + +import "hash/crc32" + +// Calc will calculate an POSIX retcode from an error. +func Calc(err error) int { + if err == nil { + return 0 + } + return int(crc32.ChecksumIEEE([]byte(err.Error())))%254 + 1 +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/cli/send.go b/vendor/knative.dev/kn-plugin-event/pkg/cli/send.go new file mode 100644 index 0000000000..fa48253b5f --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/cli/send.go @@ -0,0 +1,37 @@ +package cli + +import ( + "errors" + "fmt" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "knative.dev/kn-plugin-event/pkg/event" +) + +// Send will send CloudEvent to target. +func (c *App) Send(ce cloudevents.Event, target TargetArgs, options *Options) error { + props, err := options.WithLogger() + if err != nil { + return err + } + t, err := c.createTarget(target, props) + if err != nil { + return err + } + s, err := c.Binding.NewSender(t) + if err != nil { + return cantSentEvent(err) + } + err = s.Send(ce) + if err == nil { + return nil + } + return cantSentEvent(err) +} + +func cantSentEvent(err error) error { + if errors.Is(err, event.ErrCantSentEvent) { + return err + } + return fmt.Errorf("%w: %v", event.ErrCantSentEvent, err) +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/cli/target.go b/vendor/knative.dev/kn-plugin-event/pkg/cli/target.go new file mode 100644 index 0000000000..b9f96f57b9 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/cli/target.go @@ -0,0 +1,114 @@ +package cli + +import ( + "errors" + "fmt" + "net/url" + "regexp" + + clientutil "knative.dev/client/pkg/util" + "knative.dev/kn-plugin-event/pkg/event" + "knative.dev/pkg/apis" +) + +var ( + // ErrCantUseBothToURLAndToFlags will be raised if user use both --to and + // --to-url flags. + ErrCantUseBothToURLAndToFlags = errors.New("can't use both --to and --to-url flags") + // ErrUseToURLOrToFlagIsRequired will be raised if user didn't used --to or + // --to-url flags. + ErrUseToURLOrToFlagIsRequired = errors.New("use --to or --to-url flag is required") + // ErrInvalidURLFormat will be raised if given URL is invalid. + ErrInvalidURLFormat = errors.New("invalid URL format") + // ErrInvalidToFormat will be raised if given addressable doesn't have valid + // expected format. + ErrInvalidToFormat = errors.New("--to flag needs to be in format " + + "kind:apiVersion:name for named resources or " + + "kind:apiVersion:labelKey1=value1,labelKey2=value2 for matching via " + + "a label selector") +) + +// ValidateTarget will perform validation on App element of target. +func ValidateTarget(args *TargetArgs) error { + if args.URL == "" && args.Addressable == "" { + return ErrUseToURLOrToFlagIsRequired + } + if args.URL != "" && args.Addressable != "" { + return ErrCantUseBothToURLAndToFlags + } + if args.URL != "" { + _, err := url.ParseRequestURI(args.URL) + if err != nil { + return fmt.Errorf("--to-url %w: %s", ErrInvalidURLFormat, err.Error()) + } + } + if args.Addressable != "" { + // ref: https://regex101.com/r/TcxsLO/3 + r := regexp.MustCompile("([a-zA-Z0-9]+):([a-zA-Z0-9/.]+):([a-zA-Z0-9=,_-]+)") + if !r.MatchString(args.Addressable) { + return ErrInvalidToFormat + } + } + return validateAddressableURI(args.AddressableURI) +} + +func validateAddressableURI(uri string) error { + if len(uri) > 0 { + _, err := url.ParseRequestURI(uri) + if err != nil { + return fmt.Errorf("--addressable-uri %w: %s", ErrInvalidURLFormat, err.Error()) + } + } + return nil +} + +func (c *App) createTarget(args TargetArgs, props *event.Properties) (*event.Target, error) { + if args.Addressable != "" { + args, err := c.fillInDefaultNamespace(args, props) + if err != nil { + return nil, err + } + ref, err := clientutil.ToTrackerReference(args.Addressable, args.Namespace) + if err != nil { + return nil, fmt.Errorf("%w: %s", ErrInvalidToFormat, err.Error()) + } + uri := &apis.URL{Path: args.AddressableURI} + return &event.Target{ + Type: event.TargetTypeAddressable, + AddressableVal: &event.AddressableSpec{ + Reference: ref, + URI: uri, + SenderNamespace: args.SenderNamespace, + }, + Properties: props, + }, nil + } + if args.URL != "" { + u, err := url.Parse(args.URL) + if err != nil { + return nil, fmt.Errorf("--to-url %w: %s", ErrInvalidURLFormat, err.Error()) + } + return &event.Target{ + Type: event.TargetTypeReachable, + URLVal: u, + Properties: props, + }, nil + } + return nil, ErrUseToURLOrToFlagIsRequired +} + +func (c *App) fillInDefaultNamespace(args TargetArgs, props *event.Properties) (TargetArgs, error) { + if len(args.Namespace) == 0 || len(args.SenderNamespace) == 0 { + defaultNs, err := c.DefaultNamespace(props) + if err != nil { + return TargetArgs{}, cantSentEvent(err) + } + if len(args.Namespace) == 0 { + args.Namespace = defaultNs + } + if len(args.SenderNamespace) == 0 { + args.SenderNamespace = defaultNs + } + } + return args, nil +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/cli/types.go b/vendor/knative.dev/kn-plugin-event/pkg/cli/types.go new file mode 100644 index 0000000000..e313a1b855 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/cli/types.go @@ -0,0 +1,56 @@ +package cli + +import ( + "io" + + "github.com/thediveo/enumflag" + "knative.dev/kn-plugin-event/pkg/event" +) + +// Options holds a general args for all commands. +type Options struct { + event.KnPluginOptions + + // Output define type of output commands should be producing. + Output OutputMode + + // Verbose tells does commands should display additional information about + // what's happening? Verbose information is printed on stderr. + Verbose bool + + OutWriter io.Writer + ErrWriter io.Writer +} + +// EventArgs holds args of event to be created with. +type EventArgs struct { + Type string + ID string + Source string + Fields []string + RawFields []string +} + +// TargetArgs holds args specific for even sending. +type TargetArgs struct { + URL string + Addressable string + Namespace string + SenderNamespace string + AddressableURI string +} + +// OutputMode is type of output to produce. +type OutputMode enumflag.Flag + +// OutputMode enumeration values. +const ( + HumanReadable OutputMode = iota + JSON + YAML +) + +// App object. +type App struct { + event.Binding +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/cli/version.go b/vendor/knative.dev/kn-plugin-event/pkg/cli/version.go new file mode 100644 index 0000000000..7e9e114e99 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/cli/version.go @@ -0,0 +1,9 @@ +package cli + +// PluginVersionOutput is a struct that is used to output project version in +// machine readable format. +type PluginVersionOutput struct { + Name string `json:"name" yaml:"name"` + Version string `json:"version" yaml:"version"` + Image string `json:"image" yaml:"image"` +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/configuration/cli.go b/vendor/knative.dev/kn-plugin-event/pkg/configuration/cli.go new file mode 100644 index 0000000000..a097374dfb --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/configuration/cli.go @@ -0,0 +1,11 @@ +package configuration + +import ( + "knative.dev/kn-plugin-event/pkg/cli" +) + +// CreateCli creates the configured cli.App to work with. +func CreateCli() *cli.App { + binding := senderBinding() + return &cli.App{Binding: eventsBinding(binding)} +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/configuration/defaults.go b/vendor/knative.dev/kn-plugin-event/pkg/configuration/defaults.go new file mode 100644 index 0000000000..9d947ae583 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/configuration/defaults.go @@ -0,0 +1,22 @@ +package configuration + +import ( + "knative.dev/kn-plugin-event/pkg/event" + "knative.dev/kn-plugin-event/pkg/k8s" + "knative.dev/kn-plugin-event/pkg/sender" +) + +func senderBinding() sender.Binding { + return sender.Binding{ + CreateKubeClients: memoizeKubeClients(k8s.CreateKubeClient), + CreateJobRunner: k8s.CreateJobRunner, + CreateAddressResolver: k8s.CreateAddressResolver, + } +} + +func eventsBinding(binding sender.Binding) event.Binding { + return event.Binding{ + CreateSender: binding.New, + DefaultNamespace: binding.DefaultNamespace, + } +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/configuration/ics.go b/vendor/knative.dev/kn-plugin-event/pkg/configuration/ics.go new file mode 100644 index 0000000000..f1b6aa02c7 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/configuration/ics.go @@ -0,0 +1,11 @@ +package configuration + +import ( + "knative.dev/kn-plugin-event/pkg/cli/ics" +) + +// CreateIcs creates the configured ics.App to work with. +func CreateIcs() *ics.App { + binding := senderBinding() + return &ics.App{Binding: eventsBinding(binding)} +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/configuration/memoized.go b/vendor/knative.dev/kn-plugin-event/pkg/configuration/memoized.go new file mode 100644 index 0000000000..4aa24dd8fb --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/configuration/memoized.go @@ -0,0 +1,29 @@ +package configuration + +import ( + "knative.dev/kn-plugin-event/pkg/event" + "knative.dev/kn-plugin-event/pkg/k8s" + "knative.dev/kn-plugin-event/pkg/sender" +) + +func memoizeKubeClients(delegate sender.CreateKubeClients) sender.CreateKubeClients { + mem := kubeClientsMemoizer{delegate: delegate} + return mem.computeClients +} + +type kubeClientsMemoizer struct { + delegate sender.CreateKubeClients + result k8s.Clients +} + +func (m *kubeClientsMemoizer) computeClients(props *event.Properties) (k8s.Clients, error) { + if m.result != nil { + return m.result, nil + } + cl, err := m.delegate(props) + if err != nil { + return nil, err + } + m.result = cl + return m.result, nil +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/event/constants.go b/vendor/knative.dev/kn-plugin-event/pkg/event/constants.go new file mode 100644 index 0000000000..48b76fd32c --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/event/constants.go @@ -0,0 +1,27 @@ +package event + +import ( + "errors" + "fmt" + + "github.com/google/uuid" + "knative.dev/kn-plugin-event/pkg/metadata" +) + +const ( + // DefaultType holds a default type for a event. + DefaultType = "dev.knative.cli.plugin.event.generic" +) + +// ErrUnexpected if unexpected error found. +var ErrUnexpected = errors.New("unexpected") + +// DefaultSource holds a default source of an event. +func DefaultSource() string { + return fmt.Sprintf("%s/%s", metadata.PluginName, metadata.Version) +} + +// NewID creates a new ID for an event. +func NewID() string { + return uuid.New().String() +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/event/create.go b/vendor/knative.dev/kn-plugin-event/pkg/event/create.go new file mode 100644 index 0000000000..c7d8766a9f --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/event/create.go @@ -0,0 +1,81 @@ +package event + +import ( + "errors" + "fmt" + "strings" + "time" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/wavesoftware/go-ensure" +) + +var ( + // ErrCantMarshalAsJSON is returned if given CE data can't be marshalled + // as JSON. + ErrCantMarshalAsJSON = errors.New("can't marshal as JSON") + + // ErrCantSetField is returned if given field can't be applied. + ErrCantSetField = errors.New("can't set field") +) + +// NewDefault creates a default CloudEvent. +func NewDefault() *cloudevents.Event { + e := cloudevents.NewEvent() + e.SetType(DefaultType) + e.SetID(NewID()) + ensure.NoError(e.SetData(cloudevents.ApplicationJSON, map[string]string{})) + e.SetSource(DefaultSource()) + e.SetTime(time.Now()) + ensure.NoError(e.Validate()) + return &e +} + +// CreateFromSpec will create an event by parsing given args. +func CreateFromSpec(spec *Spec) (*cloudevents.Event, error) { + e := NewDefault() + e.SetID(spec.ID) + e.SetSource(spec.Source) + e.SetType(spec.Type) + m := map[string]interface{}{} + for _, fieldSpec := range spec.Fields { + if err := updateMapWithSpec(m, fieldSpec); err != nil { + return nil, err + } + } + err := e.SetData(cloudevents.ApplicationJSON, m) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrCantMarshalAsJSON, err) + } + return e, nil +} + +func updateMapWithSpec(m map[string]interface{}, spec FieldSpec) error { + sep := "." + paths := strings.Split(spec.Path, sep) + curr := m + for i, p := range paths { + if i < len(paths)-1 { + if _, ok := curr[p]; !ok { + curr[p] = map[string]interface{}{} + } + candidate := curr[p] + var ok bool + curr, ok = candidate.(map[string]interface{}) + if !ok { + return fmt.Errorf("%w: %#v path in conflict with value %#v", + ErrCantSetField, spec.Path, candidate) + } + } else { + curr[p] = spec.Value + } + } + return nil +} + +// AddField will add a field to the spec. +func (s *Spec) AddField(path string, val interface{}) { + s.Fields = append(s.Fields, FieldSpec{ + Path: path, Value: val, + }) +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/event/sender.go b/vendor/knative.dev/kn-plugin-event/pkg/event/sender.go new file mode 100644 index 0000000000..087b8f5524 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/event/sender.go @@ -0,0 +1,41 @@ +package event + +import ( + "errors" + "fmt" + + cloudevents "github.com/cloudevents/sdk-go/v2" +) + +// ErrCantSentEvent if event can't be sent. +var ErrCantSentEvent = errors.New("can't sent event") + +// NewSender will create a sender that can send event to cluster. +func (b Binding) NewSender(target *Target) (Sender, error) { + sender, err := b.CreateSender(target) + if err != nil { + return nil, err + } + return &sendLogic{Sender: sender, Properties: target.Properties}, nil +} + +type sendLogic struct { + Sender + *Properties +} + +func (l *sendLogic) Send(ce cloudevents.Event) error { + err := l.Sender.Send(ce) + if err == nil { + l.Log.Infof("Event (ID: %s) have been sent.", ce.ID()) + return nil + } + return cantSentEvent(err) +} + +func cantSentEvent(err error) error { + if errors.Is(err, ErrCantSentEvent) { + return err + } + return fmt.Errorf("%w: %v", ErrCantSentEvent, err) +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/event/types.go b/vendor/knative.dev/kn-plugin-event/pkg/event/types.go new file mode 100644 index 0000000000..052d402aed --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/event/types.go @@ -0,0 +1,92 @@ +package event + +import ( + "net/url" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "go.uber.org/zap" + "knative.dev/pkg/apis" + "knative.dev/pkg/tracker" +) + +// Spec holds specification of event to be created. +type Spec struct { + Type string + ID string + Source string + Fields []FieldSpec +} + +// FieldSpec holds a specification of a event's data field. +type FieldSpec struct { + Path string + Value interface{} +} + +// TargetType specify a type of a event target. +type TargetType int + +const ( + // TargetTypeReachable specify a type of event target that is network + // reachable, and direct HTTP communication can be performed. + TargetTypeReachable TargetType = iota + + // TargetTypeAddressable represent a type of event target that is cluster + // private, and direct communication can't be performed. In this case in + // cluster sender Job will be created to send the event. + TargetTypeAddressable +) + +// AddressableSpec specify destination of a event to be sent, as well as sender +// namespace that should be used to create a sender Job in. +type AddressableSpec struct { + *tracker.Reference + URI *apis.URL + SenderNamespace string +} + +// Target is a target to send event to. +type Target struct { + Type TargetType + URLVal *url.URL + AddressableVal *AddressableSpec + *Properties +} + +// KubeconfigOptions holds options for Kubernetes Client. +type KubeconfigOptions struct { + Path string + Context string + Cluster string +} + +// KnPluginOptions holds options inherited to every Kn plugin. +type KnPluginOptions struct { + KubeconfigOptions +} + +// Properties holds a general properties. +type Properties struct { + KnPluginOptions + Log *zap.SugaredLogger +} + +// Sender will send event to specified target. +type Sender interface { + // Send will send cloudevents.Event to configured target, or return an error + // if one occur. + Send(ce cloudevents.Event) error +} + +// CreateSender creates a Sender. +type CreateSender func(target *Target) (Sender, error) + +// DefaultNamespace returns a default namespace for connected K8s cluster or +// error is namespace can't be determined. +type DefaultNamespace func(props *Properties) (string, error) + +// Binding holds injectable dependencies. +type Binding struct { + CreateSender + DefaultNamespace +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/k8s/addressresolver.go b/vendor/knative.dev/kn-plugin-event/pkg/k8s/addressresolver.go new file mode 100644 index 0000000000..92f9e69f72 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/k8s/addressresolver.go @@ -0,0 +1,119 @@ +package k8s + +import ( + "context" + "fmt" + "net/url" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/client/injection/ducks/duck/v1/addressable" + "knative.dev/pkg/controller" + "knative.dev/pkg/injection/clients/dynamicclient" + "knative.dev/pkg/kmeta" + "knative.dev/pkg/resolver" + "knative.dev/pkg/tracker" +) + +// ReferenceAddressResolver will resolve the tracker.Reference to an url.URL, or +// return an error. +type ReferenceAddressResolver interface { + ResolveAddress(ref *tracker.Reference, uri *apis.URL) (*url.URL, error) +} + +// CreateAddressResolver will create ReferenceAddressResolver, or return an +// error. +func CreateAddressResolver(kube Clients) ReferenceAddressResolver { + ctx := ctxWithDynamic(kube) + return &addressResolver{ + kube: kube, ctx: addressable.WithDuck(ctx), + } +} + +type addressResolver struct { + kube Clients + ctx context.Context +} + +// ResolveAddress of a tracker.Reference with given uri (as apis.URL). +func (a *addressResolver) ResolveAddress( + ref *tracker.Reference, + uri *apis.URL, +) (*url.URL, error) { + gvr := a.toGVR(ref) + dest, err := a.toDestination(gvr, ref, uri) + if err != nil { + return nil, err + } + parent := toAccessor(ref) + tr := tracker.New(noopCallback, controller.GetTrackerLease(a.ctx)) + r := resolver.NewURIResolverFromTracker(a.ctx, tr) + u, err := r.URIFromDestinationV1(a.ctx, *dest, parent) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrNotAddressable, err) + } + resolved := u.URL() + return resolved, nil +} + +func (a *addressResolver) toDestination( + gvr schema.GroupVersionResource, + ref *tracker.Reference, + uri *apis.URL, +) (*duckv1.Destination, error) { + dest := &duckv1.Destination{ + Ref: &duckv1.KReference{ + Kind: ref.Kind, + Namespace: ref.Namespace, + Name: ref.Name, + APIVersion: ref.APIVersion, + }, + URI: uri, + } + if ref.Selector != nil { + list, err := a.kube.Dynamic().Resource(gvr). + Namespace(ref.Namespace).List(a.ctx, metav1.ListOptions{ + LabelSelector: ref.Selector.String(), + }) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrNotFound, err) + } + count := len(list.Items) + if count == 0 { + return nil, ErrNotFound + } + if count > 1 { + return nil, fmt.Errorf("%w: %d", ErrMoreThenOneFound, count) + } + dest.Ref.Name = list.Items[0].GetName() + } + return dest, nil +} + +func (a *addressResolver) toGVR(ref *tracker.Reference) schema.GroupVersionResource { + gvk := ref.GroupVersionKind() + gvr := apis.KindToResource(gvk) + return gvr +} + +func toAccessor(ref *tracker.Reference) kmeta.Accessor { + return &unstructured.Unstructured{Object: map[string]interface{}{ + "apiVersion": ref.APIVersion, + "kind": ref.Kind, + "metadata": map[string]interface{}{ + "name": ref.Name, + "namespace": ref.Namespace, + }, + }} +} + +func ctxWithDynamic(kube Clients) context.Context { + return context.WithValue(kube.Context(), dynamicclient.Key{}, kube.Dynamic()) +} + +func noopCallback(_ types.NamespacedName) { +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/k8s/errors.go b/vendor/knative.dev/kn-plugin-event/pkg/k8s/errors.go new file mode 100644 index 0000000000..1ca84639cf --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/k8s/errors.go @@ -0,0 +1,23 @@ +package k8s + +import "errors" + +var ( + // ErrInvalidReference if given reference is invalid. + ErrInvalidReference = errors.New("reference is invalid") + + // ErrNotFound if given reference do not point to any resource. + ErrNotFound = errors.New("resource not found") + + // ErrNotAddressable if found resource isn't addressable. + ErrNotAddressable = errors.New("resource isn't addressable") + + // ErrMoreThenOneFound if more then one resource has been found. + ErrMoreThenOneFound = errors.New("more then one resource has been found") + + // ErrUnexcpected if something unexpected actually has happened. + ErrUnexcpected = errors.New("something unexpected actually has happened") + + // ErrICSenderJobFailed if the ICS job runner has failed. + ErrICSenderJobFailed = errors.New("the ICS job runner has failed") +) diff --git a/vendor/knative.dev/kn-plugin-event/pkg/k8s/jobrunner.go b/vendor/knative.dev/kn-plugin-event/pkg/k8s/jobrunner.go new file mode 100644 index 0000000000..668b9155d7 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/k8s/jobrunner.go @@ -0,0 +1,143 @@ +package k8s + +import ( + "fmt" + "sync" + + batchv1 "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" +) + +// JobRunner will launch a Job and monitor it for completion. +type JobRunner interface { + Run(*batchv1.Job) error +} + +// CreateJobRunner will create a JobRunner, or return an error. +func CreateJobRunner(kube Clients) JobRunner { + return &jobRunner{ + kube: kube, + } +} + +type jobRunner struct { + kube Clients +} + +type task struct { + errs chan<- error + ready chan<- bool + wg *sync.WaitGroup +} + +func (j *jobRunner) Run(job *batchv1.Job) error { + ready := make(chan bool) + errs := make(chan error) + tsk := task{ + errs, ready, &sync.WaitGroup{}, + } + tasks := []func(*batchv1.Job, task){ + // wait is started first, making sure to capture success, even the ultra-fast one. + j.waitForSuccess, + j.createJob, + } + tsk.wg.Add(len(tasks)) + // run all tasks in parallel + for _, fn := range tasks { + go fn(job, tsk) + <-ready + } + go waitAndClose(tsk) + // return the first error + for err := range errs { + if err != nil { + return err + } + } + + return j.deleteJob(job) +} + +func (j *jobRunner) createJob(job *batchv1.Job, tsk task) { + defer tsk.wg.Done() + tsk.ready <- true + ctx := j.kube.Context() + jobs := j.kube.Typed().BatchV1().Jobs(job.Namespace) + _, err := jobs.Create(ctx, job, metav1.CreateOptions{}) + if err != nil { + tsk.errs <- fmt.Errorf("%w: %v", ErrICSenderJobFailed, err) + } +} + +func (j *jobRunner) waitForSuccess(job *batchv1.Job, tsk task) { + defer tsk.wg.Done() + err := j.watchJob(job, tsk, func(job *batchv1.Job) (bool, error) { + if job.Status.CompletionTime == nil && job.Status.Failed == 0 { + return false, nil + } + // We should be done if we reach here. + if job.Status.Succeeded < 1 { + return false, fmt.Errorf("%w: %s", ErrICSenderJobFailed, + "expected to have successful job") + } + return true, nil + }) + if err != nil { + tsk.errs <- fmt.Errorf("%w: %v", ErrICSenderJobFailed, err) + } +} + +func waitAndClose(tsk task) { + tsk.wg.Wait() + close(tsk.errs) +} + +func (j *jobRunner) deleteJob(job *batchv1.Job) error { + ctx := j.kube.Context() + jobs := j.kube.Typed().BatchV1().Jobs(job.GetNamespace()) + err := jobs.Delete(ctx, job.GetName(), metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("%w: %v", ErrICSenderJobFailed, err) + } + pods := j.kube.Typed().CoreV1().Pods(job.GetNamespace()) + err = pods.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("job-name=%s", job.GetName()), + }) + if err != nil { + return fmt.Errorf("%w: %v", ErrICSenderJobFailed, err) + } + return nil +} + +func (j *jobRunner) watchJob(obj metav1.Object, tsk task, changeFn func(job *batchv1.Job) (bool, error)) error { + ctx := j.kube.Context() + jobs := j.kube.Typed().BatchV1().Jobs(obj.GetNamespace()) + watcher, err := jobs.Watch(ctx, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.name=%s", obj.GetName()), + }) + if err != nil { + return fmt.Errorf("%w: %v", ErrICSenderJobFailed, err) + } + defer watcher.Stop() + resultCh := watcher.ResultChan() + tsk.ready <- true + for result := range resultCh { + if result.Type == watch.Added || result.Type == watch.Modified { + job, ok := result.Object.(*batchv1.Job) + if !ok { + return fmt.Errorf("%w: %s: %T", ErrICSenderJobFailed, + "expected to watch batchv1.Job, got", result.Object) + } + var brk bool + brk, err = changeFn(job) + if err != nil { + return fmt.Errorf("%w: %v", ErrICSenderJobFailed, err) + } + if brk { + return nil + } + } + } + return nil +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/k8s/kubeclient.go b/vendor/knative.dev/kn-plugin-event/pkg/k8s/kubeclient.go new file mode 100644 index 0000000000..69de830665 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/k8s/kubeclient.go @@ -0,0 +1,141 @@ +package k8s + +import ( + "context" + "errors" + "fmt" + + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + + // see: https://github.com/kubernetes/client-go/issues/242 + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + eventingv1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1" + messagingv1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1" + "knative.dev/kn-plugin-event/pkg/event" + servingv1 "knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1" +) + +// ErrNoKubernetesConnection if can't connect to Kube API server. +var ErrNoKubernetesConnection = errors.New("no Kubernetes connection") + +// CreateKubeClient creates kubernetes.Interface. +func CreateKubeClient(props *event.Properties) (Clients, error) { + cc, err := loadClientConfig(props) + if err != nil { + return nil, err + } + restcfg := cc.Config + typed, err := kubernetes.NewForConfig(restcfg) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrUnexcpected, err) + } + dyn, err := dynamic.NewForConfig(restcfg) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrUnexcpected, err) + } + servingclient, err := servingv1.NewForConfig(restcfg) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrUnexcpected, err) + } + eventingclient, err := eventingv1.NewForConfig(restcfg) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrUnexcpected, err) + } + messagingclient, err := messagingv1.NewForConfig(restcfg) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrUnexcpected, err) + } + return &clients{ + ctx: context.Background(), + namespace: cc.namespace, + typed: typed, + dynamic: dyn, + serving: servingclient, + eventing: eventingclient, + messaging: messagingclient, + }, nil +} + +// Clients holds available Kubernetes clients. +type Clients interface { + Namespace() string + Typed() kubernetes.Interface + Dynamic() dynamic.Interface + Context() context.Context + Serving() servingv1.ServingV1Interface + Eventing() eventingv1.EventingV1Interface + Messaging() messagingv1.MessagingV1Interface +} + +func loadClientConfig(props *event.Properties) (clientConfig, error) { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + var configOverrides *clientcmd.ConfigOverrides + if props.Context != "" && props.Cluster != "" { + configOverrides = &clientcmd.ConfigOverrides{} + if props.Context != "" { + configOverrides.CurrentContext = props.Context + } + if props.Cluster != "" { + configOverrides.Context.Cluster = props.Cluster + } + } + if len(props.Path) > 0 { + loadingRules.ExplicitPath = props.Path + } + cc := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) + cfg, err := cc.ClientConfig() + if err != nil { + return clientConfig{}, fmt.Errorf("%w: %v", ErrNoKubernetesConnection, err) + } + ns, _, err := cc.Namespace() + if err != nil { + return clientConfig{}, fmt.Errorf("%w: %v", ErrNoKubernetesConnection, err) + } + return clientConfig{Config: cfg, namespace: ns}, nil +} + +type clientConfig struct { + *rest.Config + namespace string +} + +type clients struct { + namespace string + ctx context.Context + typed kubernetes.Interface + dynamic dynamic.Interface + serving servingv1.ServingV1Interface + eventing eventingv1.EventingV1Interface + messaging messagingv1.MessagingV1Interface +} + +func (c *clients) Typed() kubernetes.Interface { + return c.typed +} + +func (c *clients) Dynamic() dynamic.Interface { + return c.dynamic +} + +func (c *clients) Context() context.Context { + return c.ctx +} + +func (c *clients) Serving() servingv1.ServingV1Interface { + return c.serving +} + +func (c *clients) Eventing() eventingv1.EventingV1Interface { + return c.eventing +} + +func (c *clients) Messaging() messagingv1.MessagingV1Interface { + return c.messaging +} + +func (c *clients) Namespace() string { + return c.namespace +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/metadata/image.go b/vendor/knative.dev/kn-plugin-event/pkg/metadata/image.go new file mode 100644 index 0000000000..e1e74b0566 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/metadata/image.go @@ -0,0 +1,32 @@ +package metadata + +import "fmt" + +var ( + // Image holds information about companion image reference. + Image = "" //nolint:gochecknoglobals + // ImageBasename holds a basename of a image, so the development reference + // could be built from it. + ImageBasename = "" //nolint:gochecknoglobals +) + +// ResolveImage will try to resolve the image reference from set values. If +// Image is given it will be used, otherwise the ImageBasename and Version will +// be used. +func ResolveImage() string { + //goland:noinspection GoBoolExpressions + if Image == "" { + return fmt.Sprintf("%s/kn-event-sender:%s", ImageBasename, Version) + } + return Image +} + +// ImagePath return a path to the image variable. +func ImagePath() string { + return importPath("Image") +} + +// ImageBasenamePath return a path to the image basename variable. +func ImageBasenamePath() string { + return importPath("ImageBasename") +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/metadata/names.go b/vendor/knative.dev/kn-plugin-event/pkg/metadata/names.go new file mode 100644 index 0000000000..242647f8b0 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/metadata/names.go @@ -0,0 +1,13 @@ +package metadata + +const ( + // PluginName hold a name of the plugin. + PluginName = "kn-event" + // PluginUse hold a the plugin short name, intended to use as "kn ". + PluginUse = "event" + // PluginDescription holds a short description of the plugin. + PluginDescription = "Manage CloudEvents from command line" + // PluginLongDescription holds a long description of the plugin. + PluginLongDescription = `Manage CloudEvents from command line. Perform, easily, tasks like sending, +building, and parsing, all from command line.` +) diff --git a/vendor/knative.dev/kn-plugin-event/pkg/metadata/reflect.go b/vendor/knative.dev/kn-plugin-event/pkg/metadata/reflect.go new file mode 100644 index 0000000000..f54bb1d4f1 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/metadata/reflect.go @@ -0,0 +1,18 @@ +package metadata + +import ( + "fmt" + "reflect" +) + +type marker struct{} + +func importPath(variable string) string { + m := marker{} + p := findPackageForType(m) + return fmt.Sprintf("%s.%s", p, variable) +} + +func findPackageForType(any interface{}) string { + return reflect.TypeOf(any).PkgPath() +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/metadata/version.go b/vendor/knative.dev/kn-plugin-event/pkg/metadata/version.go new file mode 100644 index 0000000000..e5550a83cb --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/metadata/version.go @@ -0,0 +1,9 @@ +package metadata + +// Version holds application version information. +var Version = "0.0.0" //nolint:gochecknoglobals + +// VersionPath return a path to the version variable. +func VersionPath() string { + return importPath("Version") +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/plugin/plugin.go b/vendor/knative.dev/kn-plugin-event/pkg/plugin/plugin.go new file mode 100644 index 0000000000..819ea9e458 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/plugin/plugin.go @@ -0,0 +1,46 @@ +package plugin + +import ( + "io" + + knplugin "knative.dev/client/pkg/kn/plugin" + "knative.dev/kn-plugin-event/internal/cli/cmd" + "knative.dev/kn-plugin-event/pkg/metadata" +) + +// init makes sure to register plugin as internal one, after import of +// pkg/plugin, as knative cli plugins are expected to do. +// nolint:gochecknoinits +func init() { + knplugin.InternalPlugins = append(knplugin.InternalPlugins, &plugin{}) +} + +type plugin struct { + io.Writer +} + +func (p plugin) Name() string { + return metadata.PluginName +} + +func (p plugin) Execute(args []string) error { + opts := []cmd.CommandOption{ + cmd.WithArgs(args...), + } + if p.Writer != nil { + opts = append(opts, cmd.WithOutput(p.Writer)) + } + return new(cmd.Cmd).ExecuteWithOptions(opts...) //nolint:wrapcheck +} + +func (p plugin) Description() (string, error) { + return metadata.PluginDescription, nil +} + +func (p plugin) CommandParts() []string { + return []string{metadata.PluginUse} +} + +func (p plugin) Path() string { + return "" +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/plugin/testing.go b/vendor/knative.dev/kn-plugin-event/pkg/plugin/testing.go new file mode 100644 index 0000000000..102c1dfda6 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/plugin/testing.go @@ -0,0 +1,35 @@ +package plugin + +import ( + "bytes" + "io" + + knplugin "knative.dev/client/pkg/kn/plugin" +) + +// WithCapture captures the output from of a running plugin. +func WithCapture(fn func()) []byte { + var buf bytes.Buffer + WithOutput(&buf, fn) + return buf.Bytes() +} + +// WithOutput executes provided function after configuring the stdout and stderr. +func WithOutput(output io.Writer, fn func()) { + pl := findPlugin() + save := pl.Writer + pl.Writer = output + defer func() { + pl.Writer = save + }() + fn() +} + +func findPlugin() *plugin { + for _, ip := range knplugin.InternalPlugins { + if pl, ok := ip.(*plugin); ok { + return pl + } + } + panic("this should never happen") +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/sender/create.go b/vendor/knative.dev/kn-plugin-event/pkg/sender/create.go new file mode 100644 index 0000000000..fd2e6186ae --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/sender/create.go @@ -0,0 +1,38 @@ +package sender + +import ( + "errors" + "fmt" + + "knative.dev/kn-plugin-event/pkg/event" +) + +// New creates a new Sender. +func (b *Binding) New(target *event.Target) (event.Sender, error) { + switch target.Type { + case event.TargetTypeReachable: + return &directSender{ + url: *target.URLVal, + }, nil + case event.TargetTypeAddressable: + kube, err := b.CreateKubeClients(target.Properties) + if err != nil { + return nil, err + } + jr := b.CreateJobRunner(kube) + ar := b.CreateAddressResolver(kube) + return &inClusterSender{ + addressable: target.AddressableVal, + jobRunner: jr, + addressResolver: ar, + }, nil + } + return nil, fmt.Errorf("%w: %v", ErrUnsupportedTargetType, target.Type) +} + +func cantSentEvent(err error) error { + if errors.Is(err, event.ErrCantSentEvent) { + return err + } + return fmt.Errorf("%w: %v", event.ErrCantSentEvent, err) +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/sender/direct.go b/vendor/knative.dev/kn-plugin-event/pkg/sender/direct.go new file mode 100644 index 0000000000..b591c9b807 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/sender/direct.go @@ -0,0 +1,30 @@ +package sender + +import ( + "context" + "net/url" + + cloudevents "github.com/cloudevents/sdk-go/v2" +) + +type directSender struct { + url url.URL +} + +func (d *directSender) Send(ce cloudevents.Event) error { + c, err := cloudevents.NewClientHTTP() + if err != nil { + return cantSentEvent(err) + } + + // Set a target. + ctx := cloudevents.ContextWithTarget(context.TODO(), d.url.String()) + + // Send that Event. + err = c.Send(ctx, ce) + if !cloudevents.IsACK(err) { + return cantSentEvent(err) + } + + return nil +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/sender/in_cluster.go b/vendor/knative.dev/kn-plugin-event/pkg/sender/in_cluster.go new file mode 100644 index 0000000000..3647da0325 --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/sender/in_cluster.go @@ -0,0 +1,65 @@ +package sender + +import ( + "fmt" + + cloudevents "github.com/cloudevents/sdk-go/v2" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/kn-plugin-event/pkg/cli/ics" + "knative.dev/kn-plugin-event/pkg/event" + "knative.dev/kn-plugin-event/pkg/k8s" + "knative.dev/kn-plugin-event/pkg/metadata" +) + +type inClusterSender struct { + addressable *event.AddressableSpec + jobRunner k8s.JobRunner + addressResolver k8s.ReferenceAddressResolver +} + +func (i *inClusterSender) Send(ce cloudevents.Event) error { + url, err := i.addressResolver.ResolveAddress( + i.addressable.Reference, i.addressable.URI, + ) + if err != nil { + return fmt.Errorf("%w: %v", k8s.ErrInvalidReference, err) + } + kevent, err := ics.Encode(ce) + if err != nil { + return fmt.Errorf("%w: %v", ics.ErrCouldntEncode, err) + } + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("kn-event-sender-%s", ce.ID()), + Namespace: i.addressable.SenderNamespace, + Labels: map[string]string{ + "event-id": ce.ID(), + }, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{{ + Name: "kn-event-sender", + Image: metadata.ResolveImage(), + Env: []corev1.EnvVar{{ + Name: "K_SINK", + Value: url.String(), + }, { + Name: "K_EVENT", + Value: kevent, + }}, + }}, + }, + }, + }, + } + err = i.jobRunner.Run(job) + if err != nil { + return fmt.Errorf("%w: %v", ics.ErrCantSendWithICS, err) + } + return nil +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/sender/namespace.go b/vendor/knative.dev/kn-plugin-event/pkg/sender/namespace.go new file mode 100644 index 0000000000..364bcf24cf --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/sender/namespace.go @@ -0,0 +1,13 @@ +package sender + +import "knative.dev/kn-plugin-event/pkg/event" + +// DefaultNamespace returns a default namespace of connected K8s cluster or +// error if such namespace can't be determined. +func (b *Binding) DefaultNamespace(props *event.Properties) (string, error) { + clients, err := b.CreateKubeClients(props) + if err != nil { + return "", err + } + return clients.Namespace(), nil +} diff --git a/vendor/knative.dev/kn-plugin-event/pkg/sender/types.go b/vendor/knative.dev/kn-plugin-event/pkg/sender/types.go new file mode 100644 index 0000000000..e87a5fe50a --- /dev/null +++ b/vendor/knative.dev/kn-plugin-event/pkg/sender/types.go @@ -0,0 +1,28 @@ +package sender + +import ( + "errors" + + "knative.dev/kn-plugin-event/pkg/event" + "knative.dev/kn-plugin-event/pkg/k8s" +) + +// ErrUnsupportedTargetType is an error if user pass unsupported event target +// type. Only supporting: reachable or addressable. +var ErrUnsupportedTargetType = errors.New("unsupported target type") + +// CreateKubeClients creates k8s.Clients. +type CreateKubeClients func(props *event.Properties) (k8s.Clients, error) + +// CreateJobRunner creates a k8s.JobRunner. +type CreateJobRunner func(kube k8s.Clients) k8s.JobRunner + +// CreateAddressResolver creates a k8s.ReferenceAddressResolver. +type CreateAddressResolver func(kube k8s.Clients) k8s.ReferenceAddressResolver + +// Binding holds injectable dependencies. +type Binding struct { + CreateJobRunner + CreateAddressResolver + CreateKubeClients +} diff --git a/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go b/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go index 772cd764fc..4ea939b298 100644 --- a/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go +++ b/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go @@ -366,6 +366,7 @@ func HTTPGetActionMask(in *corev1.HTTPGetAction) *corev1.HTTPGetAction { out.Path = in.Path out.Scheme = in.Scheme out.HTTPHeaders = in.HTTPHeaders + out.Port = in.Port return out } @@ -381,6 +382,7 @@ func TCPSocketActionMask(in *corev1.TCPSocketAction) *corev1.TCPSocketAction { // Allowed fields out.Host = in.Host + out.Port = in.Port return out } diff --git a/vendor/knative.dev/serving/pkg/apis/serving/k8s_validation.go b/vendor/knative.dev/serving/pkg/apis/serving/k8s_validation.go index 3c737c3555..583f3387cd 100644 --- a/vendor/knative.dev/serving/pkg/apis/serving/k8s_validation.go +++ b/vendor/knative.dev/serving/pkg/apis/serving/k8s_validation.go @@ -324,14 +324,19 @@ func ValidatePodSpec(ctx context.Context, ps corev1.PodSpec) *apis.FieldError { errs = errs.Also(err.ViaField("volumes")) } + port, err := validateContainersPorts(ps.Containers) + if err != nil { + errs = errs.Also(err.ViaField("containers[*]")) + } + switch len(ps.Containers) { case 0: errs = errs.Also(apis.ErrMissingField("containers")) case 1: - errs = errs.Also(ValidateContainer(ctx, ps.Containers[0], volumes). + errs = errs.Also(ValidateContainer(ctx, ps.Containers[0], volumes, port). ViaFieldIndex("containers", 0)) default: - errs = errs.Also(validateContainers(ctx, ps.Containers, volumes)) + errs = errs.Also(validateContainers(ctx, ps.Containers, volumes, port)) } if ps.ServiceAccountName != "" { for _, err := range validation.IsDNS1123Subdomain(ps.ServiceAccountName) { @@ -356,20 +361,20 @@ func validateInitContainers(ctx context.Context, containers []corev1.Container, return errs } -func validateContainers(ctx context.Context, containers []corev1.Container, volumes map[string]corev1.Volume) (errs *apis.FieldError) { +func validateContainers(ctx context.Context, containers []corev1.Container, volumes map[string]corev1.Volume, port corev1.ContainerPort) (errs *apis.FieldError) { features := config.FromContextOrDefaults(ctx).Features if features.MultiContainer != config.Enabled { return errs.Also(&apis.FieldError{Message: fmt.Sprintf("multi-container is off, "+ "but found %d containers", len(containers))}) } - errs = errs.Also(validateContainersPorts(containers).ViaField("containers")) for i := range containers { // Probes are not allowed on other than serving container, // ref: http://bit.ly/probes-condition if len(containers[i].Ports) == 0 { + // Note, if we allow readiness/liveness checks on sidecars, we should pass in an *empty* port here, not the main container's port. errs = errs.Also(validateSidecarContainer(WithinSidecarContainer(ctx), containers[i], volumes).ViaFieldIndex("containers", i)) } else { - errs = errs.Also(ValidateContainer(WithinUserContainer(ctx), containers[i], volumes).ViaFieldIndex("containers", i)) + errs = errs.Also(ValidateContainer(WithinUserContainer(ctx), containers[i], volumes, port).ViaFieldIndex("containers", i)) } } return errs @@ -386,21 +391,38 @@ func AllMountedVolumes(containers []corev1.Container) sets.String { return volumeNames } -// validateContainersPorts validates port when specified multiple containers -func validateContainersPorts(containers []corev1.Container) *apis.FieldError { +// validateContainersPorts validates port when specified multiple containers, +// and returns the single serving port if error is nil +func validateContainersPorts(containers []corev1.Container) (corev1.ContainerPort, *apis.FieldError) { var count int + var port = corev1.ContainerPort{ + Name: "http", + ContainerPort: 8080, + } for i := range containers { - count += len(containers[i].Ports) + if c := len(containers[i].Ports); c > 0 { + count += c + if containers[i].Ports[0].ContainerPort != 0 { + port.ContainerPort = containers[i].Ports[0].ContainerPort + } + if containers[i].Ports[0].Name != "" { + port.Name = containers[i].Ports[0].Name + } + } } // When no container ports are specified. - if count == 0 { - return apis.ErrMissingField("ports") + if count == 0 && len(containers) > 1 { + return port, apis.ErrMissingField("ports") } // More than one container sections have ports. if count > 1 { - return apis.ErrMultipleOneOf("ports") + return port, &apis.FieldError{ + Message: "more than one container port is set", + Paths: []string{"ports"}, + Details: "Only a single port is allowed across all containers", + } } - return nil + return port, nil } // validateSidecarContainer validate fields for non serving containers @@ -447,27 +469,14 @@ func validateInitContainer(ctx context.Context, container corev1.Container, volu } // ValidateContainer validate fields for serving containers -func ValidateContainer(ctx context.Context, container corev1.Container, volumes map[string]corev1.Volume) (errs *apis.FieldError) { - // Single container cannot have multiple ports - errs = errs.Also(portValidation(container.Ports).ViaField("ports")) +func ValidateContainer(ctx context.Context, container corev1.Container, volumes map[string]corev1.Volume, port corev1.ContainerPort) (errs *apis.FieldError) { // Liveness Probes - errs = errs.Also(validateProbe(container.LivenessProbe).ViaField("livenessProbe")) + errs = errs.Also(validateProbe(container.LivenessProbe, port).ViaField("livenessProbe")) // Readiness Probes - errs = errs.Also(validateReadinessProbe(container.ReadinessProbe).ViaField("readinessProbe")) + errs = errs.Also(validateReadinessProbe(container.ReadinessProbe, port).ViaField("readinessProbe")) return errs.Also(validate(ctx, container, volumes)) } -func portValidation(containerPorts []corev1.ContainerPort) *apis.FieldError { - if len(containerPorts) > 1 { - return &apis.FieldError{ - Message: "More than one container port is set", - Paths: []string{apis.CurrentField}, - Details: "Only a single port is allowed", - } - } - return nil -} - func validate(ctx context.Context, container corev1.Container, volumes map[string]corev1.Volume) *apis.FieldError { if equality.Semantic.DeepEqual(container, corev1.Container{}) { return apis.ErrMissingField(apis.CurrentField) @@ -653,12 +662,12 @@ func validateContainerPortBasic(port corev1.ContainerPort) *apis.FieldError { return errs } -func validateReadinessProbe(p *corev1.Probe) *apis.FieldError { +func validateReadinessProbe(p *corev1.Probe, port corev1.ContainerPort) *apis.FieldError { if p == nil { return nil } - errs := validateProbe(p) + errs := validateProbe(p, port) if p.PeriodSeconds < 0 { errs = errs.Also(apis.ErrOutOfBoundsValue(p.PeriodSeconds, 0, math.MaxInt32, "periodSeconds")) @@ -700,7 +709,7 @@ func validateReadinessProbe(p *corev1.Probe) *apis.FieldError { return errs } -func validateProbe(p *corev1.Probe) *apis.FieldError { +func validateProbe(p *corev1.Probe, port corev1.ContainerPort) *apis.FieldError { if p == nil { return nil } @@ -714,10 +723,18 @@ func validateProbe(p *corev1.Probe) *apis.FieldError { if h.HTTPGet != nil { handlers = append(handlers, "httpGet") errs = errs.Also(apis.CheckDisallowedFields(*h.HTTPGet, *HTTPGetActionMask(h.HTTPGet))).ViaField("httpGet") + getPort := h.HTTPGet.Port + if (getPort.StrVal != "" && getPort.StrVal != port.Name) || (getPort.IntVal != 0 && getPort.IntVal != port.ContainerPort) { + errs = errs.Also(apis.ErrInvalidValue(getPort.String(), "httpGet.port", "May only probe containerPort")) + } } if h.TCPSocket != nil { handlers = append(handlers, "tcpSocket") errs = errs.Also(apis.CheckDisallowedFields(*h.TCPSocket, *TCPSocketActionMask(h.TCPSocket))).ViaField("tcpSocket") + tcpPort := h.TCPSocket.Port + if (tcpPort.StrVal != "" && tcpPort.StrVal != port.Name) || (tcpPort.IntVal != 0 && tcpPort.IntVal != port.ContainerPort) { + errs = errs.Also(apis.ErrInvalidValue(tcpPort.String(), "tcpSocket.port", "May only probe containerPort")) + } } if h.Exec != nil { handlers = append(handlers, "exec") diff --git a/vendor/knative.dev/serving/test/e2e-common.sh b/vendor/knative.dev/serving/test/e2e-common.sh index d3641bc03d..7f564936c1 100644 --- a/vendor/knative.dev/serving/test/e2e-common.sh +++ b/vendor/knative.dev/serving/test/e2e-common.sh @@ -55,14 +55,22 @@ export SYSTEM_NAMESPACE="${SYSTEM_NAMESPACE:-$(uuidgen | tr 'A-Z' 'a-z')}" readonly REPLICAS=3 readonly BUCKETS=10 +# Receives the latest serving version and searches for the same version with major and minor and searches for the latest patch +function latest_net_istio_version() { + local serving_version=$1 + local major_minor + major_minor=$(echo "$serving_version" | cut -d '.' -f 1,2) + + curl -L --silent "https://api.github.com/repos/knative/net-istio/releases" | jq --arg major_minor "$major_minor" -r '[.[].tag_name] | map(select(. | startswith($major_minor))) | sort_by( sub("knative-";"") | sub("v";"") | split(".") | map(tonumber) ) | reverse[0]' +} + # Latest serving release. If user does not supply this as a flag, the latest # tagged release on the current branch will be used. LATEST_SERVING_RELEASE_VERSION=$(latest_version) # Latest net-istio release. -LATEST_NET_ISTIO_RELEASE_VERSION=$( - curl -L --silent "https://api.github.com/repos/knative/net-istio/releases" | grep '"tag_name"' \ - | cut -f2 -d: | sed "s/[^v0-9.]//g" | sort | tail -n1) +LATEST_NET_ISTIO_RELEASE_VERSION=$(latest_net_istio_version "$LATEST_SERVING_RELEASE_VERSION") + # Parse our custom flags. function parse_flags() { diff --git a/vendor/modules.txt b/vendor/modules.txt index 5bb9283a50..6b0b526fad 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,14 +1,27 @@ -# cloud.google.com/go v0.97.0 +# cloud.google.com/go v0.98.0 cloud.google.com/go/compute/metadata # contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d contrib.go.opencensus.io/exporter/ocagent # contrib.go.opencensus.io/exporter/prometheus v0.4.0 contrib.go.opencensus.io/exporter/prometheus +# github.com/Azure/go-autorest v14.2.0+incompatible +github.com/Azure/go-autorest +# github.com/Azure/go-autorest/autorest v0.11.17 +github.com/Azure/go-autorest/autorest +github.com/Azure/go-autorest/autorest/azure +# github.com/Azure/go-autorest/autorest/adal v0.9.10 +github.com/Azure/go-autorest/autorest/adal +# github.com/Azure/go-autorest/autorest/date v0.3.0 +github.com/Azure/go-autorest/autorest/date +# github.com/Azure/go-autorest/logger v0.2.0 +github.com/Azure/go-autorest/logger +# github.com/Azure/go-autorest/tracing v0.6.0 +github.com/Azure/go-autorest/tracing # github.com/PuerkitoBio/purell v1.1.1 github.com/PuerkitoBio/purell # github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 github.com/PuerkitoBio/urlesc -# github.com/Shopify/sarama v1.29.1 +# github.com/Shopify/sarama v1.30.0 github.com/Shopify/sarama # github.com/beorn7/perks v1.0.1 github.com/beorn7/perks/quantile @@ -38,7 +51,7 @@ github.com/cloudevents/sdk-go/v2/event/datacodec/xml github.com/cloudevents/sdk-go/v2/protocol github.com/cloudevents/sdk-go/v2/protocol/http github.com/cloudevents/sdk-go/v2/types -# github.com/cpuguy83/go-md2man/v2 v2.0.0 +# github.com/cpuguy83/go-md2man/v2 v2.0.1 github.com/cpuguy83/go-md2man/v2/md2man # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew @@ -53,10 +66,14 @@ github.com/emicklei/go-restful github.com/emicklei/go-restful/log # github.com/evanphx/json-patch v4.9.0+incompatible github.com/evanphx/json-patch -# github.com/evanphx/json-patch/v5 v5.5.0 +# github.com/evanphx/json-patch/v5 v5.6.0 github.com/evanphx/json-patch/v5 -# github.com/fsnotify/fsnotify v1.4.9 +# github.com/form3tech-oss/jwt-go v3.2.2+incompatible +github.com/form3tech-oss/jwt-go +# github.com/fsnotify/fsnotify v1.5.1 github.com/fsnotify/fsnotify +# github.com/ghodss/yaml v1.0.0 +github.com/ghodss/yaml # github.com/go-errors/errors v1.0.1 github.com/go-errors/errors # github.com/go-kit/log v0.1.0 @@ -64,7 +81,7 @@ github.com/go-kit/log github.com/go-kit/log/level # github.com/go-logfmt/logfmt v0.5.0 github.com/go-logfmt/logfmt -# github.com/go-logr/logr v0.4.0 +# github.com/go-logr/logr v1.2.2 github.com/go-logr/logr # github.com/go-openapi/jsonpointer v0.19.5 github.com/go-openapi/jsonpointer @@ -89,7 +106,7 @@ github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers -# github.com/golang/snappy v0.0.3 +# github.com/golang/snappy v0.0.4 github.com/golang/snappy # github.com/google/btree v1.0.0 github.com/google/btree @@ -138,7 +155,7 @@ github.com/hashicorp/hcl/hcl/token github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token -# github.com/imdario/mergo v0.3.11 +# github.com/imdario/mergo v0.3.12 github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.0.0 github.com/inconshreveable/mousetrap @@ -189,9 +206,11 @@ github.com/josharian/intern github.com/json-iterator/go # github.com/kelseyhightower/envconfig v1.4.0 github.com/kelseyhightower/envconfig -# github.com/klauspost/compress v1.13.0 +# github.com/klauspost/compress v1.13.6 +github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash # github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de @@ -216,7 +235,7 @@ github.com/maximilien/kn-source-pkg/test/e2e # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir -# github.com/mitchellh/mapstructure v1.4.1 +# github.com/mitchellh/mapstructure v1.4.2 github.com/mitchellh/mapstructure # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd github.com/modern-go/concurrent @@ -224,13 +243,13 @@ github.com/modern-go/concurrent github.com/modern-go/reflect2 # github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 github.com/monochromegane/go-gitignore -# github.com/openzipkin/zipkin-go v0.2.5 +# github.com/openzipkin/zipkin-go v0.3.0 github.com/openzipkin/zipkin-go/model -# github.com/pelletier/go-toml v1.9.3 +# github.com/pelletier/go-toml v1.9.4 github.com/pelletier/go-toml # github.com/peterbourgon/diskv v2.0.1+incompatible github.com/peterbourgon/diskv -# github.com/pierrec/lz4 v2.6.0+incompatible +# github.com/pierrec/lz4 v2.6.1+incompatible github.com/pierrec/lz4 github.com/pierrec/lz4/internal/xxh32 # github.com/pkg/errors v0.9.1 @@ -243,7 +262,7 @@ github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.2.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.31.1 +# github.com/prometheus/common v0.32.1 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model @@ -267,7 +286,7 @@ github.com/russross/blackfriday/v2 # github.com/spf13/afero v1.6.0 github.com/spf13/afero github.com/spf13/afero/mem -# github.com/spf13/cast v1.3.1 +# github.com/spf13/cast v1.4.1 github.com/spf13/cast # github.com/spf13/cobra v1.2.1 ## explicit @@ -278,13 +297,22 @@ github.com/spf13/jwalterweatherman # github.com/spf13/pflag v1.0.5 ## explicit github.com/spf13/pflag -# github.com/spf13/viper v1.8.1 +# github.com/spf13/viper v1.9.0 ## explicit github.com/spf13/viper +github.com/spf13/viper/internal/encoding +github.com/spf13/viper/internal/encoding/hcl +github.com/spf13/viper/internal/encoding/json +github.com/spf13/viper/internal/encoding/toml +github.com/spf13/viper/internal/encoding/yaml # github.com/stretchr/testify v1.7.0 github.com/stretchr/testify/assert # github.com/subosito/gotenv v1.2.0 github.com/subosito/gotenv +# github.com/thediveo/enumflag v0.10.0 +github.com/thediveo/enumflag +# github.com/wavesoftware/go-ensure v1.0.0 +github.com/wavesoftware/go-ensure # github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca github.com/xlab/treeprint # go.opencensus.io v0.23.0 @@ -325,13 +353,16 @@ go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore -# golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 +# golang.org/x/crypto v0.0.0-20210920023735-84f357641f63 golang.org/x/crypto/md4 golang.org/x/crypto/pbkdf2 -# golang.org/x/mod v0.4.2 +golang.org/x/crypto/pkcs12 +golang.org/x/crypto/pkcs12/internal/rc2 +# golang.org/x/mod v0.5.1 +golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f +# golang.org/x/net v0.0.0-20211020060615-d418f374d309 golang.org/x/net/context golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts @@ -343,7 +374,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 +# golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/google @@ -354,7 +385,7 @@ golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 +# golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 @@ -363,7 +394,7 @@ golang.org/x/sys/windows # golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d ## explicit golang.org/x/term -# golang.org/x/text v0.3.6 +# golang.org/x/text v0.3.7 golang.org/x/text/encoding golang.org/x/text/encoding/internal golang.org/x/text/encoding/internal/identifier @@ -377,7 +408,7 @@ golang.org/x/text/unicode/norm golang.org/x/text/width # golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac golang.org/x/time/rate -# golang.org/x/tools v0.1.7 +# golang.org/x/tools v0.1.8 golang.org/x/tools/go/ast/astutil golang.org/x/tools/imports golang.org/x/tools/internal/event @@ -394,7 +425,7 @@ golang.org/x/xerrors golang.org/x/xerrors/internal # gomodules.xyz/jsonpatch/v2 v2.2.0 gomodules.xyz/jsonpatch/v2 -# google.golang.org/api v0.58.0 +# google.golang.org/api v0.61.0 google.golang.org/api/support/bundler # google.golang.org/appengine v1.6.7 google.golang.org/appengine @@ -407,11 +438,11 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20211016002631-37fc39342514 +# google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12 google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.41.0 +# google.golang.org/grpc v1.42.0 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -494,7 +525,7 @@ google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb # gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 -# gopkg.in/ini.v1 v1.62.0 +# gopkg.in/ini.v1 v1.63.2 gopkg.in/ini.v1 # gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 @@ -560,7 +591,7 @@ k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 -# k8s.io/apimachinery v0.21.4 +# k8s.io/apimachinery v0.22.3 => k8s.io/apimachinery v0.21.4 ## explicit k8s.io/apimachinery/pkg/api/apitesting/fuzzer k8s.io/apimachinery/pkg/api/equality @@ -714,9 +745,12 @@ k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 k8s.io/client-go/pkg/version +k8s.io/client-go/plugin/pkg/client/auth +k8s.io/client-go/plugin/pkg/client/auth/azure k8s.io/client-go/plugin/pkg/client/auth/exec k8s.io/client-go/plugin/pkg/client/auth/gcp k8s.io/client-go/plugin/pkg/client/auth/oidc +k8s.io/client-go/plugin/pkg/client/auth/openstack k8s.io/client-go/rest k8s.io/client-go/rest/watch k8s.io/client-go/restmapper @@ -792,7 +826,7 @@ k8s.io/gengo/parser k8s.io/gengo/types # k8s.io/klog v1.0.0 k8s.io/klog -# k8s.io/klog/v2 v2.8.0 +# k8s.io/klog/v2 v2.40.1 k8s.io/klog/v2 # k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 k8s.io/kube-openapi/cmd/openapi-gen/args @@ -806,7 +840,7 @@ k8s.io/utils/buffer k8s.io/utils/integer k8s.io/utils/pointer k8s.io/utils/trace -# knative.dev/eventing v0.27.0 +# knative.dev/eventing v0.27.2 ## explicit knative.dev/eventing/pkg/apis/config knative.dev/eventing/pkg/apis/duck @@ -846,9 +880,21 @@ knative.dev/eventing-kafka/pkg/client/clientset/versioned/scheme knative.dev/eventing-kafka/pkg/client/clientset/versioned/typed/sources/v1beta1 knative.dev/eventing-kafka/pkg/client/clientset/versioned/typed/sources/v1beta1/fake knative.dev/eventing-kafka/pkg/common/constants -# knative.dev/hack v0.0.0-20211101195839-11d193bf617b +# knative.dev/hack v0.0.0-20211122163517-fe1340f21191 ## explicit knative.dev/hack +# knative.dev/kn-plugin-event v0.27.1 => github.com/openshift-knative/kn-plugin-event v0.27.1-0.20220223114256-af13ecf492aa +## explicit +knative.dev/kn-plugin-event/internal/cli/cmd +knative.dev/kn-plugin-event/pkg/cli +knative.dev/kn-plugin-event/pkg/cli/ics +knative.dev/kn-plugin-event/pkg/cli/retcode +knative.dev/kn-plugin-event/pkg/configuration +knative.dev/kn-plugin-event/pkg/event +knative.dev/kn-plugin-event/pkg/k8s +knative.dev/kn-plugin-event/pkg/metadata +knative.dev/kn-plugin-event/pkg/plugin +knative.dev/kn-plugin-event/pkg/sender # knative.dev/kn-plugin-source-kafka v0.27.0 ## explicit knative.dev/kn-plugin-source-kafka/pkg/client @@ -916,7 +962,7 @@ knative.dev/pkg/tracing/propagation knative.dev/pkg/tracing/propagation/tracecontextb3 knative.dev/pkg/tracker knative.dev/pkg/webhook/resourcesemantics -# knative.dev/serving v0.27.0 +# knative.dev/serving v0.27.1 ## explicit knative.dev/serving/pkg/apis/autoscaling knative.dev/serving/pkg/apis/autoscaling/v1alpha1 @@ -1031,3 +1077,5 @@ sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.3.0 ## explicit sigs.k8s.io/yaml +# k8s.io/apimachinery => k8s.io/apimachinery v0.21.4 +# knative.dev/kn-plugin-event => github.com/openshift-knative/kn-plugin-event v0.27.1-0.20220223114256-af13ecf492aa