From 9f507abe8ddc6ea52bbed52dede7c293c327ec99 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 22 Dec 2024 03:31:11 +0000 Subject: [PATCH] build(deps): bump github.com/mholt/archiver/v4 Bumps [github.com/mholt/archiver/v4](https://github.com/mholt/archiver) from 4.0.0-alpha.8 to 4.0.0-alpha.9. - [Release notes](https://github.com/mholt/archiver/releases) - [Changelog](https://github.com/mholt/archiver/blob/master/.goreleaser.yml) - [Commits](https://github.com/mholt/archiver/compare/v4.0.0-alpha.8...v4.0.0-alpha.9) --- updated-dependencies: - dependency-name: github.com/mholt/archiver/v4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 26 +- go.sum | 60 +- vendor/github.com/STARRY-S/zip/.gitignore | 16 + vendor/github.com/STARRY-S/zip/LICENSE | 28 + vendor/github.com/STARRY-S/zip/README.md | 43 + vendor/github.com/STARRY-S/zip/reader.go | 979 ++++++++++++++++++ vendor/github.com/STARRY-S/zip/register.go | 147 +++ vendor/github.com/STARRY-S/zip/struct.go | 419 ++++++++ vendor/github.com/STARRY-S/zip/updater.go | 537 ++++++++++ vendor/github.com/STARRY-S/zip/writer.go | 634 ++++++++++++ .../github.com/andybalholm/brotli/README.md | 7 + .../andybalholm/brotli/bitwriter.go | 56 + .../andybalholm/brotli/brotli_bit_stream.go | 311 +++++- .../brotli/compress_fragment_two_pass.go | 51 +- .../github.com/andybalholm/brotli/decode.go | 17 +- .../github.com/andybalholm/brotli/encoder.go | 177 ++++ .../brotli/entropy_encode_static.go | 5 + vendor/github.com/andybalholm/brotli/http.go | 10 +- .../andybalholm/brotli/matchfinder/emitter.go | 34 + .../andybalholm/brotli/matchfinder/m0.go | 169 +++ .../andybalholm/brotli/matchfinder/m4.go | 308 ++++++ .../brotli/matchfinder/matchfinder.go | 103 ++ .../brotli/matchfinder/textencoder.go | 53 + .../github.com/andybalholm/brotli/reader.go | 8 +- vendor/github.com/andybalholm/brotli/state.go | 1 - .../github.com/andybalholm/brotli/writer.go | 43 + .../github.com/bodgit/plumbing/.golangci.yaml | 1 + .../bodgit/plumbing/.goreleaser.yml | 2 + vendor/github.com/bodgit/plumbing/README.md | 2 +- vendor/github.com/bodgit/plumbing/fill.go | 21 + vendor/github.com/bodgit/plumbing/padded.go | 4 +- vendor/github.com/bodgit/plumbing/zero.go | 18 + .../github.com/bodgit/sevenzip/.golangci.yaml | 110 +- .../bodgit/sevenzip/.pre-commit-config.yaml | 18 + .../sevenzip/.release-please-manifest.json | 3 + .../github.com/bodgit/sevenzip/CHANGELOG.md | 39 + vendor/github.com/bodgit/sevenzip/LICENSE | 1 - vendor/github.com/bodgit/sevenzip/README.md | 108 +- .../bodgit/sevenzip/internal/aes7z/key.go | 42 +- .../bodgit/sevenzip/internal/aes7z/reader.go | 39 +- .../bodgit/sevenzip/internal/bcj2/reader.go | 4 +- .../bodgit/sevenzip/internal/bra/arm.go | 55 + .../bodgit/sevenzip/internal/bra/bcj.go | 104 ++ .../bodgit/sevenzip/internal/bra/bra.go | 6 + .../sevenzip/internal/bra/minmax_compat.go | 21 + .../bodgit/sevenzip/internal/bra/ppc.go | 48 + .../bodgit/sevenzip/internal/bra/reader.go | 58 ++ .../bodgit/sevenzip/internal/bra/sparc.go | 53 + .../bodgit/sevenzip/internal/lzma/reader.go | 34 +- .../bodgit/sevenzip/internal/pool/pool.go | 4 +- .../bodgit/sevenzip/internal/zstd/reader.go | 1 + vendor/github.com/bodgit/sevenzip/reader.go | 183 +++- vendor/github.com/bodgit/sevenzip/register.go | 9 + .../sevenzip/release-please-config.json | 6 + vendor/github.com/bodgit/sevenzip/struct.go | 51 +- vendor/github.com/bodgit/sevenzip/types.go | 82 +- .../github.com/bodgit/windows/.golangci.yaml | 13 + .../{sevenzip => windows}/.goreleaser.yml | 2 + vendor/github.com/bodgit/windows/.travis.yml | 9 - vendor/github.com/bodgit/windows/README.md | 9 +- vendor/github.com/bodgit/windows/filetime.go | 2 + vendor/github.com/connesc/cipherio/.gitignore | 1 - vendor/github.com/connesc/cipherio/LICENSE | 21 - vendor/github.com/connesc/cipherio/README.md | 14 - vendor/github.com/connesc/cipherio/doc.go | 12 - vendor/github.com/connesc/cipherio/padding.go | 54 - vendor/github.com/connesc/cipherio/reader.go | 178 ---- vendor/github.com/connesc/cipherio/writer.go | 151 --- vendor/github.com/dsnet/compress/bzip2/bwt.go | 1 + .../github.com/dsnet/compress/bzip2/common.go | 2 + .../dsnet/compress/bzip2/fuzz_off.go | 1 + .../dsnet/compress/bzip2/fuzz_on.go | 1 + .../dsnet/compress/bzip2/mtf_rle2.go | 2 + .../github.com/dsnet/compress/bzip2/prefix.go | 3 +- .../github.com/dsnet/compress/bzip2/rle1.go | 2 + .../dsnet/compress/internal/debug.go | 1 + .../dsnet/compress/internal/errors/errors.go | 2 +- .../dsnet/compress/internal/gofuzz.go | 1 + .../dsnet/compress/internal/prefix/debug.go | 1 + .../dsnet/compress/internal/prefix/prefix.go | 4 +- .../dsnet/compress/internal/prefix/range.go | 1 + .../dsnet/compress/internal/release.go | 1 + .../hashicorp/golang-lru/v2/.gitignore | 23 + .../hashicorp/golang-lru/v2/.golangci.yml | 46 + .../github.com/hashicorp/golang-lru/v2/2q.go | 267 +++++ .../hashicorp/golang-lru/v2/LICENSE | 364 +++++++ .../hashicorp/golang-lru/v2/README.md | 79 ++ .../github.com/hashicorp/golang-lru/v2/doc.go | 24 + .../hashicorp/golang-lru/v2/internal/list.go | 142 +++ .../github.com/hashicorp/golang-lru/v2/lru.go | 250 +++++ .../golang-lru/v2/simplelru/LICENSE_list | 29 + .../hashicorp/golang-lru/v2/simplelru/lru.go | 177 ++++ .../golang-lru/v2/simplelru/lru_interface.go | 46 + .../klauspost/compress/.goreleaser.yml | 6 +- .../github.com/klauspost/compress/README.md | 29 +- .../klauspost/compress/flate/deflate.go | 2 +- .../klauspost/compress/flate/inflate.go | 74 +- .../klauspost/compress/flate/matchlen_amd64.s | 10 +- .../klauspost/compress/fse/decompress.go | 2 +- .../klauspost/compress/huff0/decompress.go | 4 +- .../compress/internal/godebug/godebug.go | 44 + .../klauspost/compress/zip/reader.go | 274 +++-- .../klauspost/compress/zip/register.go | 6 +- .../klauspost/compress/zip/struct.go | 97 +- .../klauspost/compress/zip/writer.go | 84 +- .../klauspost/compress/zlib/reader.go | 32 +- .../klauspost/compress/zlib/writer.go | 18 +- .../klauspost/compress/zstd/blockdec.go | 4 +- .../klauspost/compress/zstd/dict.go | 31 + .../klauspost/compress/zstd/enc_better.go | 32 +- .../klauspost/compress/zstd/enc_dfast.go | 16 +- .../klauspost/compress/zstd/encoder.go | 45 +- .../klauspost/compress/zstd/framedec.go | 4 +- .../zstd/internal/xxhash/xxhash_arm64.s | 4 +- .../klauspost/compress/zstd/matchlen_amd64.s | 10 +- .../klauspost/compress/zstd/seqdec_amd64.go | 4 +- .../klauspost/compress/zstd/seqdec_amd64.s | 8 +- .../klauspost/compress/zstd/zstd.go | 4 + vendor/github.com/mholt/archiver/v4/7z.go | 33 +- vendor/github.com/mholt/archiver/v4/README.md | 25 +- .../github.com/mholt/archiver/v4/archiver.go | 66 +- vendor/github.com/mholt/archiver/v4/brotli.go | 19 +- vendor/github.com/mholt/archiver/v4/bz2.go | 7 +- .../github.com/mholt/archiver/v4/formats.go | 203 ++-- vendor/github.com/mholt/archiver/v4/fs.go | 852 ++++++++------- vendor/github.com/mholt/archiver/v4/gz.go | 28 +- .../mholt/archiver/v4/interfaces.go | 37 +- vendor/github.com/mholt/archiver/v4/lz4.go | 7 +- vendor/github.com/mholt/archiver/v4/lzip.go | 54 + vendor/github.com/mholt/archiver/v4/rar.go | 34 +- vendor/github.com/mholt/archiver/v4/sz.go | 7 +- vendor/github.com/mholt/archiver/v4/tar.go | 46 +- vendor/github.com/mholt/archiver/v4/xz.go | 7 +- vendor/github.com/mholt/archiver/v4/zip.go | 115 +- vendor/github.com/mholt/archiver/v4/zlib.go | 37 +- vendor/github.com/mholt/archiver/v4/zstd.go | 7 +- .../nwaples/rardecode/v2/archive.go | 12 +- .../nwaples/rardecode/v2/archive15.go | 41 +- .../nwaples/rardecode/v2/archive50.go | 36 +- .../nwaples/rardecode/v2/bit_reader.go | 4 +- .../nwaples/rardecode/v2/decode20.go | 5 +- .../nwaples/rardecode/v2/decode20_lz.go | 12 +- .../nwaples/rardecode/v2/decode29.go | 16 +- .../nwaples/rardecode/v2/decode29_lz.go | 12 +- .../nwaples/rardecode/v2/decode29_ppm.go | 9 + .../nwaples/rardecode/v2/decode50.go | 12 +- .../nwaples/rardecode/v2/decode_reader.go | 18 +- .../nwaples/rardecode/v2/filters.go | 2 +- .../nwaples/rardecode/v2/huffman.go | 8 +- .../nwaples/rardecode/v2/ppm_model.go | 10 +- .../github.com/nwaples/rardecode/v2/reader.go | 24 +- vendor/github.com/nwaples/rardecode/v2/vm.go | 4 +- .../github.com/nwaples/rardecode/v2/volume.go | 22 +- vendor/github.com/pierrec/lz4/v4/README.md | 2 +- .../pierrec/lz4/v4/compressing_reader.go | 222 ++++ .../pierrec/lz4/v4/internal/lz4block/block.go | 9 +- .../lz4/v4/internal/lz4block/blocks.go | 5 +- .../lz4/v4/internal/lz4block/decode_arm64.s | 15 +- .../lz4/v4/internal/lz4block/decode_other.go | 13 +- .../lz4/v4/internal/lz4stream/block.go | 4 +- .../lz4/v4/internal/xxh32/xxh32zero.go | 2 +- vendor/github.com/pierrec/lz4/v4/options.go | 28 + vendor/github.com/pierrec/lz4/v4/writer.go | 4 + .../sorairolake/lzip-go/.bumpversion.toml | 15 + .../github.com/sorairolake/lzip-go/.gitignore | 20 + .../sorairolake/lzip-go/.goreleaser.yaml | 53 + .../sorairolake/lzip-go/AUTHORS.adoc | 9 + .../sorairolake/lzip-go/CHANGELOG.adoc | 71 ++ .../sorairolake/lzip-go/CODE_OF_CONDUCT.md | 138 +++ .../sorairolake/lzip-go/CONTRIBUTING.adoc | 61 ++ vendor/github.com/sorairolake/lzip-go/LICENSE | 225 ++++ .../github.com/sorairolake/lzip-go/README.md | 119 +++ .../github.com/sorairolake/lzip-go/error.go | 107 ++ .../sorairolake/lzip-go/go.sum.license | 3 + .../github.com/sorairolake/lzip-go/justfile | 67 ++ vendor/github.com/sorairolake/lzip-go/lzip.go | 76 ++ .../github.com/sorairolake/lzip-go/reader.go | 124 +++ .../github.com/sorairolake/lzip-go/writer.go | 147 +++ vendor/go4.org/syncutil/gate.go | 41 + vendor/go4.org/syncutil/group.go | 64 ++ vendor/go4.org/syncutil/once.go | 60 ++ vendor/go4.org/syncutil/sem.go | 64 ++ vendor/go4.org/syncutil/syncutil.go | 18 + vendor/golang.org/x/text/LICENSE | 4 +- vendor/modules.txt | 48 +- 185 files changed, 10059 insertions(+), 1759 deletions(-) create mode 100644 vendor/github.com/STARRY-S/zip/.gitignore create mode 100644 vendor/github.com/STARRY-S/zip/LICENSE create mode 100644 vendor/github.com/STARRY-S/zip/README.md create mode 100644 vendor/github.com/STARRY-S/zip/reader.go create mode 100644 vendor/github.com/STARRY-S/zip/register.go create mode 100644 vendor/github.com/STARRY-S/zip/struct.go create mode 100644 vendor/github.com/STARRY-S/zip/updater.go create mode 100644 vendor/github.com/STARRY-S/zip/writer.go create mode 100644 vendor/github.com/andybalholm/brotli/bitwriter.go create mode 100644 vendor/github.com/andybalholm/brotli/encoder.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/emitter.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/m0.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/m4.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go create mode 100644 vendor/github.com/bodgit/plumbing/fill.go create mode 100644 vendor/github.com/bodgit/plumbing/zero.go create mode 100644 vendor/github.com/bodgit/sevenzip/.pre-commit-config.yaml create mode 100644 vendor/github.com/bodgit/sevenzip/.release-please-manifest.json create mode 100644 vendor/github.com/bodgit/sevenzip/CHANGELOG.md create mode 100644 vendor/github.com/bodgit/sevenzip/internal/bra/arm.go create mode 100644 vendor/github.com/bodgit/sevenzip/internal/bra/bcj.go create mode 100644 vendor/github.com/bodgit/sevenzip/internal/bra/bra.go create mode 100644 vendor/github.com/bodgit/sevenzip/internal/bra/minmax_compat.go create mode 100644 vendor/github.com/bodgit/sevenzip/internal/bra/ppc.go create mode 100644 vendor/github.com/bodgit/sevenzip/internal/bra/reader.go create mode 100644 vendor/github.com/bodgit/sevenzip/internal/bra/sparc.go create mode 100644 vendor/github.com/bodgit/sevenzip/release-please-config.json create mode 100644 vendor/github.com/bodgit/windows/.golangci.yaml rename vendor/github.com/bodgit/{sevenzip => windows}/.goreleaser.yml (63%) delete mode 100644 vendor/github.com/bodgit/windows/.travis.yml delete mode 100644 vendor/github.com/connesc/cipherio/.gitignore delete mode 100644 vendor/github.com/connesc/cipherio/LICENSE delete mode 100644 vendor/github.com/connesc/cipherio/README.md delete mode 100644 vendor/github.com/connesc/cipherio/doc.go delete mode 100644 vendor/github.com/connesc/cipherio/padding.go delete mode 100644 vendor/github.com/connesc/cipherio/reader.go delete mode 100644 vendor/github.com/connesc/cipherio/writer.go create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/.gitignore create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/2q.go create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/LICENSE create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/README.md create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/doc.go create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/internal/list.go create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/lru.go create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go create mode 100644 vendor/github.com/klauspost/compress/internal/godebug/godebug.go create mode 100644 vendor/github.com/mholt/archiver/v4/lzip.go create mode 100644 vendor/github.com/pierrec/lz4/v4/compressing_reader.go create mode 100644 vendor/github.com/sorairolake/lzip-go/.bumpversion.toml create mode 100644 vendor/github.com/sorairolake/lzip-go/.gitignore create mode 100644 vendor/github.com/sorairolake/lzip-go/.goreleaser.yaml create mode 100644 vendor/github.com/sorairolake/lzip-go/AUTHORS.adoc create mode 100644 vendor/github.com/sorairolake/lzip-go/CHANGELOG.adoc create mode 100644 vendor/github.com/sorairolake/lzip-go/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/sorairolake/lzip-go/CONTRIBUTING.adoc create mode 100644 vendor/github.com/sorairolake/lzip-go/LICENSE create mode 100644 vendor/github.com/sorairolake/lzip-go/README.md create mode 100644 vendor/github.com/sorairolake/lzip-go/error.go create mode 100644 vendor/github.com/sorairolake/lzip-go/go.sum.license create mode 100644 vendor/github.com/sorairolake/lzip-go/justfile create mode 100644 vendor/github.com/sorairolake/lzip-go/lzip.go create mode 100644 vendor/github.com/sorairolake/lzip-go/reader.go create mode 100644 vendor/github.com/sorairolake/lzip-go/writer.go create mode 100644 vendor/go4.org/syncutil/gate.go create mode 100644 vendor/go4.org/syncutil/group.go create mode 100644 vendor/go4.org/syncutil/once.go create mode 100644 vendor/go4.org/syncutil/sem.go create mode 100644 vendor/go4.org/syncutil/syncutil.go diff --git a/go.mod b/go.mod index b7686113..842c2aa2 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/containerd/platforms v0.2.1 github.com/containers/image/v5 v5.31.1 github.com/docker/docker v27.3.1+incompatible - github.com/mholt/archiver/v4 v4.0.0-alpha.8 + github.com/mholt/archiver/v4 v4.0.0-alpha.9 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0 github.com/pkg/errors v0.9.1 @@ -24,14 +24,14 @@ require ( github.com/BurntSushi/toml v1.3.2 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/hcsshim v0.12.3 // indirect + github.com/STARRY-S/zip v0.1.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect - github.com/andybalholm/brotli v1.0.4 // indirect + github.com/andybalholm/brotli v1.1.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/bodgit/plumbing v1.2.0 // indirect - github.com/bodgit/sevenzip v1.3.0 // indirect - github.com/bodgit/windows v1.0.0 // indirect - github.com/connesc/cipherio v0.2.1 // indirect + github.com/bodgit/plumbing v1.3.0 // indirect + github.com/bodgit/sevenzip v1.5.2 // indirect + github.com/bodgit/windows v1.0.1 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/errdefs v0.1.0 // indirect github.com/containerd/log v0.1.0 // indirect @@ -47,7 +47,7 @@ require ( github.com/docker/docker-credential-helpers v0.8.1 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect + github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-jose/go-jose/v3 v3.0.3 // indirect github.com/go-logr/logr v1.4.1 // indirect @@ -72,9 +72,10 @@ require ( github.com/gorilla/mux v1.8.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.8 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -90,12 +91,12 @@ require ( github.com/moby/sys/user v0.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect + github.com/nwaples/rardecode/v2 v2.0.0-beta.4 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opencontainers/selinux v1.11.0 // indirect github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect - github.com/pierrec/lz4/v4 v4.1.15 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/proglottis/gpgme v0.1.3 // indirect github.com/rivo/uniseg v0.4.7 // indirect @@ -103,6 +104,7 @@ require ( github.com/sigstore/fulcio v1.4.5 // indirect github.com/sigstore/rekor v1.3.6 // indirect github.com/sigstore/sigstore v1.8.3 // indirect + github.com/sorairolake/lzip-go v0.3.5 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect github.com/sylabs/sif/v2 v2.16.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect @@ -119,12 +121,12 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - go4.org v0.0.0-20200411211856-f5505b9728dd // indirect + go4.org v0.0.0-20230225012048-214862532bf5 // indirect golang.org/x/crypto v0.23.0 // indirect golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect golang.org/x/net v0.25.0 // indirect golang.org/x/term v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect + golang.org/x/text v0.19.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect google.golang.org/grpc v1.62.1 // indirect google.golang.org/protobuf v1.33.0 // indirect diff --git a/go.sum b/go.sum index 5fa43fe5..4139fca7 100644 --- a/go.sum +++ b/go.sum @@ -29,6 +29,8 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/hcsshim v0.12.3 h1:LS9NXqXhMoqNCplK1ApmVSfB4UnVLRDWRapB6EIlxE0= github.com/Microsoft/hcsshim v0.12.3/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ= +github.com/STARRY-S/zip v0.1.0 h1:eUER3jKmHKXjv+iy3BekLa+QnNSo1Lqz4eTzYBcGDqo= +github.com/STARRY-S/zip v0.1.0/go.mod h1:qj/mTZkvb3AvfGQ2e775/3AODRvB4peSw8KNMvrM8/I= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= @@ -39,18 +41,18 @@ github.com/alecthomas/kong v1.6.0 h1:mwOzbdMR7uv2vul9J0FU3GYxE7ls/iX1ieMg5WIM6gE github.com/alecthomas/kong v1.6.0/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU= github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= -github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= -github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bodgit/plumbing v1.2.0 h1:gg4haxoKphLjml+tgnecR4yLBV5zo4HAZGCtAh3xCzM= -github.com/bodgit/plumbing v1.2.0/go.mod h1:b9TeRi7Hvc6Y05rjm8VML3+47n4XTZPtQ/5ghqic2n8= -github.com/bodgit/sevenzip v1.3.0 h1:1ljgELgtHqvgIp8W8kgeEGHIWP4ch3xGI8uOBZgLVKY= -github.com/bodgit/sevenzip v1.3.0/go.mod h1:omwNcgZTEooWM8gA/IJ2Nk/+ZQ94+GsytRzOJJ8FBlM= -github.com/bodgit/windows v1.0.0 h1:rLQ/XjsleZvx4fR1tB/UxQrK+SJ2OFHzfPjLWWOhDIA= -github.com/bodgit/windows v1.0.0/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= +github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU= +github.com/bodgit/plumbing v1.3.0/go.mod h1:JOTb4XiRu5xfnmdnDJo6GmSbSbtSyufrsyZFByMtKEs= +github.com/bodgit/sevenzip v1.5.2 h1:acMIYRaqoHAdeu9LhEGGjL9UzBD4RNf9z7+kWDNignI= +github.com/bodgit/sevenzip v1.5.2/go.mod h1:gTGzXA67Yko6/HLSD0iK4kWaWzPlPmLfDO73jTjSRqc= +github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4= +github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -61,8 +63,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/connesc/cipherio v0.2.1 h1:FGtpTPMbKNNWByNrr9aEBtaJtXjqOzkIXNYJp6OEycw= -github.com/connesc/cipherio v0.2.1/go.mod h1:ukY0MWJDFnJEbXMQtOcn2VmTpRfzcTz4OoVrWGGJZcA= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= @@ -106,8 +106,8 @@ github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY= -github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= +github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 h1:2tV76y6Q9BB+NEBasnqvs7e49aEBFI8ejC89PSnWH+4= +github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -159,9 +159,6 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -216,6 +213,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -230,8 +229,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -255,8 +254,8 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/mholt/archiver/v4 v4.0.0-alpha.8 h1:tRGQuDVPh66WCOelqe6LIGh0gwmfwxUrSSDunscGsRM= -github.com/mholt/archiver/v4 v4.0.0-alpha.8/go.mod h1:5f7FUYGXdJWUjESffJaYR4R60VhnHxb2X3T1teMyv5A= +github.com/mholt/archiver/v4 v4.0.0-alpha.9 h1:EZgAsW6DsuawxDgTtIdjCUBa2TQ6AOe9pnCidofSRtE= +github.com/mholt/archiver/v4 v4.0.0-alpha.9/go.mod h1:5D3uct315OMkMRXKwEuMB+wQi/2m5NQngKDmApqwVlo= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU= @@ -278,8 +277,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q0rDaRO0MPaOk= -github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY= +github.com/nwaples/rardecode/v2 v2.0.0-beta.4 h1:sdiJxQdPjECn2lh9nLFFhgLCf+0ulDU5rODbtERTlUY= +github.com/nwaples/rardecode/v2 v2.0.0-beta.4/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -292,8 +291,8 @@ github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaL github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= -github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -334,6 +333,8 @@ github.com/sigstore/sigstore v1.8.3 h1:G7LVXqL+ekgYtYdksBks9B38dPoIsbscjQJX/MGWk github.com/sigstore/sigstore v1.8.3/go.mod h1:mqbTEariiGA94cn6G3xnDiV6BD8eSLdL/eA7bvJ0fVs= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sorairolake/lzip-go v0.3.5 h1:ms5Xri9o1JBIWvOFAorYtUNik6HI3HgBTkISiqu0Cwg= +github.com/sorairolake/lzip-go v0.3.5/go.mod h1:N0KYq5iWrMXI0ZEXKXaS9hCyOjZUQdBDEIbXfoUwbdk= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -370,6 +371,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= @@ -399,8 +402,8 @@ go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= -go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= +go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= +go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -453,13 +456,13 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= @@ -491,7 +494,6 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -523,8 +525,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= diff --git a/vendor/github.com/STARRY-S/zip/.gitignore b/vendor/github.com/STARRY-S/zip/.gitignore new file mode 100644 index 00000000..39bfd01d --- /dev/null +++ b/vendor/github.com/STARRY-S/zip/.gitignore @@ -0,0 +1,16 @@ +# Generated files +*.out +*.converted +*.txt +!NOTICE.txt +tmp* + +# Test archive files +*.zip + +# VSCode config +/.vscode/ + +# macOS trash +.DS_Store +._.DS_Store diff --git a/vendor/github.com/STARRY-S/zip/LICENSE b/vendor/github.com/STARRY-S/zip/LICENSE new file mode 100644 index 00000000..7bb02ab2 --- /dev/null +++ b/vendor/github.com/STARRY-S/zip/LICENSE @@ -0,0 +1,28 @@ +BSD 3-Clause License + +Copyright (c) 2023, Starry + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/STARRY-S/zip/README.md b/vendor/github.com/STARRY-S/zip/README.md new file mode 100644 index 00000000..edeb3745 --- /dev/null +++ b/vendor/github.com/STARRY-S/zip/README.md @@ -0,0 +1,43 @@ +Go zip library +============== + +This project is based on the [archive/zip](https://github.com/golang/go/tree/master/src/archive/zip) Go standard library to add a `Updater` struct to allow appending new files to the existing zip archive without decompress the whole file. + +Usage +----- + +```go +import "github.com/STARRY-S/zip" +``` + +```go +// Open an existing test.zip archive with read/write only mode for Updater. +f, err := os.OpenFile("test.zip", os.O_RDWR, 0) +handleErr(err) +fi, err := f.Stat() +handleErr(err) +zu, err := zip.NewUpdater(f, fi.Size()) +handleErr(err) +defer zu.Close() + +// Updater supports modify the zip comment. +err = zu.SetComment("Test update zip archive") +handleErr(err) + +// Append a new file into existing archive. +// The Append method will create a new io.Writer. +w, err := zu.Append("example.txt") +handleErr(err) +// Write data into writer. +_, err = w.Write([]byte("hello world")) +handleErr(err) +``` + +The completed example test code: [example_updater_test.go](./example_updater_test.go). + +License +------- + +[BSD 3-Clause](LICENSE) + +The zip library is based on [Go standard library](https://github.com/golang/go). diff --git a/vendor/github.com/STARRY-S/zip/reader.go b/vendor/github.com/STARRY-S/zip/reader.go new file mode 100644 index 00000000..01078b2a --- /dev/null +++ b/vendor/github.com/STARRY-S/zip/reader.go @@ -0,0 +1,979 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zip + +import ( + "bufio" + "encoding/binary" + "errors" + "hash" + "hash/crc32" + + // "internal/godebug" + "io" + "io/fs" + "os" + "path" + "sort" + "strings" + "sync" + "time" +) + +// var zipinsecurepath = godebug.New("zipinsecurepath") + +var ( + ErrFormat = errors.New("zip: not a valid zip file") + ErrAlgorithm = errors.New("zip: unsupported compression algorithm") + ErrChecksum = errors.New("zip: checksum error") + ErrInsecurePath = errors.New("zip: insecure file path") +) + +// A Reader serves content from a ZIP archive. +type Reader struct { + r io.ReaderAt + File []*File + Comment string + decompressors map[uint16]Decompressor + + // Some JAR files are zip files with a prefix that is a bash script. + // The baseOffset field is the start of the zip file proper. + baseOffset int64 + + // fileList is a list of files sorted by ename, + // for use by the Open method. + fileListOnce sync.Once + fileList []fileListEntry +} + +// A ReadCloser is a Reader that must be closed when no longer needed. +type ReadCloser struct { + f *os.File + Reader +} + +// A File is a single file in a ZIP archive. +// The file information is in the embedded FileHeader. +// The file content can be accessed by calling Open. +type File struct { + FileHeader + zip *Reader + zipr io.ReaderAt + headerOffset int64 // includes overall ZIP archive baseOffset + zip64 bool // zip64 extended information extra field presence +} + +// OpenReader will open the Zip file specified by name and return a ReadCloser. +// +// If any file inside the archive uses a non-local name +// (as defined by [filepath.IsLocal]) or a name containing backslashes +// and the GODEBUG environment variable contains `zipinsecurepath=0`, +// OpenReader returns the reader with an ErrInsecurePath error. +// A future version of Go may introduce this behavior by default. +// Programs that want to accept non-local names can ignore +// the ErrInsecurePath error and use the returned reader. +func OpenReader(name string) (*ReadCloser, error) { + f, err := os.Open(name) + if err != nil { + return nil, err + } + fi, err := f.Stat() + if err != nil { + f.Close() + return nil, err + } + r := new(ReadCloser) + if err = r.init(f, fi.Size()); err != nil && err != ErrInsecurePath { + f.Close() + return nil, err + } + r.f = f + return r, err +} + +// NewReader returns a new Reader reading from r, which is assumed to +// have the given size in bytes. +// +// If any file inside the archive uses a non-local name +// (as defined by [filepath.IsLocal]) or a name containing backslashes +// and the GODEBUG environment variable contains `zipinsecurepath=0`, +// NewReader returns the reader with an ErrInsecurePath error. +// A future version of Go may introduce this behavior by default. +// Programs that want to accept non-local names can ignore +// the ErrInsecurePath error and use the returned reader. +func NewReader(r io.ReaderAt, size int64) (*Reader, error) { + if size < 0 { + return nil, errors.New("zip: size cannot be negative") + } + zr := new(Reader) + var err error + if err = zr.init(r, size); err != nil && err != ErrInsecurePath { + return nil, err + } + return zr, err +} + +func (r *Reader) init(rdr io.ReaderAt, size int64) error { + end, baseOffset, err := readDirectoryEnd(rdr, size) + if err != nil { + return err + } + r.r = rdr + r.baseOffset = baseOffset + // Since the number of directory records is not validated, it is not + // safe to preallocate r.File without first checking that the specified + // number of files is reasonable, since a malformed archive may + // indicate it contains up to 1 << 128 - 1 files. Since each file has a + // header which will be _at least_ 30 bytes we can safely preallocate + // if (data size / 30) >= end.directoryRecords. + if end.directorySize < uint64(size) && (uint64(size)-end.directorySize)/30 >= end.directoryRecords { + r.File = make([]*File, 0, end.directoryRecords) + } + r.Comment = end.comment + rs := io.NewSectionReader(rdr, 0, size) + if _, err = rs.Seek(r.baseOffset+int64(end.directoryOffset), io.SeekStart); err != nil { + return err + } + buf := bufio.NewReader(rs) + + // The count of files inside a zip is truncated to fit in a uint16. + // Gloss over this by reading headers until we encounter + // a bad one, and then only report an ErrFormat or UnexpectedEOF if + // the file count modulo 65536 is incorrect. + for { + f := &File{zip: r, zipr: rdr} + err = readDirectoryHeader(f, buf) + if err == ErrFormat || err == io.ErrUnexpectedEOF { + break + } + if err != nil { + return err + } + f.headerOffset += r.baseOffset + r.File = append(r.File, f) + } + if uint16(len(r.File)) != uint16(end.directoryRecords) { // only compare 16 bits here + // Return the readDirectoryHeader error if we read + // the wrong number of directory entries. + return err + } + // if zipinsecurepath.Value() == "0" { + // for _, f := range r.File { + // if f.Name == "" { + // // Zip permits an empty file name field. + // continue + // } + // // The zip specification states that names must use forward slashes, + // // so consider any backslashes in the name insecure. + // if !filepath.IsLocal(f.Name) || strings.Contains(f.Name, `\`) { + // zipinsecurepath.IncNonDefault() + // return ErrInsecurePath + // } + // } + // } + return nil +} + +// RegisterDecompressor registers or overrides a custom decompressor for a +// specific method ID. If a decompressor for a given method is not found, +// Reader will default to looking up the decompressor at the package level. +func (r *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) { + if r.decompressors == nil { + r.decompressors = make(map[uint16]Decompressor) + } + r.decompressors[method] = dcomp +} + +func (r *Reader) decompressor(method uint16) Decompressor { + dcomp := r.decompressors[method] + if dcomp == nil { + dcomp = decompressor(method) + } + return dcomp +} + +// Close closes the Zip file, rendering it unusable for I/O. +func (rc *ReadCloser) Close() error { + return rc.f.Close() +} + +// DataOffset returns the offset of the file's possibly-compressed +// data, relative to the beginning of the zip file. +// +// Most callers should instead use Open, which transparently +// decompresses data and verifies checksums. +func (f *File) DataOffset() (offset int64, err error) { + bodyOffset, err := f.findBodyOffset() + if err != nil { + return + } + return f.headerOffset + bodyOffset, nil +} + +// Open returns a ReadCloser that provides access to the File's contents. +// Multiple files may be read concurrently. +func (f *File) Open() (io.ReadCloser, error) { + bodyOffset, err := f.findBodyOffset() + if err != nil { + return nil, err + } + if strings.HasSuffix(f.Name, "/") { + // The ZIP specification (APPNOTE.TXT) specifies that directories, which + // are technically zero-byte files, must not have any associated file + // data. We previously tried failing here if f.CompressedSize64 != 0, + // but it turns out that a number of implementations (namely, the Java + // jar tool) don't properly set the storage method on directories + // resulting in a file with compressed size > 0 but uncompressed size == + // 0. We still want to fail when a directory has associated uncompressed + // data, but we are tolerant of cases where the uncompressed size is + // zero but compressed size is not. + if f.UncompressedSize64 != 0 { + return &dirReader{ErrFormat}, nil + } else { + return &dirReader{io.EOF}, nil + } + } + size := int64(f.CompressedSize64) + r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size) + dcomp := f.zip.decompressor(f.Method) + if dcomp == nil { + return nil, ErrAlgorithm + } + var rc io.ReadCloser = dcomp(r) + var desr io.Reader + if f.hasDataDescriptor() { + desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen) + } + rc = &checksumReader{ + rc: rc, + hash: crc32.NewIEEE(), + f: f, + desr: desr, + } + return rc, nil +} + +// OpenRaw returns a Reader that provides access to the File's contents without +// decompression. +func (f *File) OpenRaw() (io.Reader, error) { + bodyOffset, err := f.findBodyOffset() + if err != nil { + return nil, err + } + r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, int64(f.CompressedSize64)) + return r, nil +} + +type dirReader struct { + err error +} + +func (r *dirReader) Read([]byte) (int, error) { + return 0, r.err +} + +func (r *dirReader) Close() error { + return nil +} + +type checksumReader struct { + rc io.ReadCloser + hash hash.Hash32 + nread uint64 // number of bytes read so far + f *File + desr io.Reader // if non-nil, where to read the data descriptor + err error // sticky error +} + +func (r *checksumReader) Stat() (fs.FileInfo, error) { + return headerFileInfo{&r.f.FileHeader}, nil +} + +func (r *checksumReader) Read(b []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + n, err = r.rc.Read(b) + r.hash.Write(b[:n]) + r.nread += uint64(n) + if r.nread > r.f.UncompressedSize64 { + return 0, ErrFormat + } + if err == nil { + return + } + if err == io.EOF { + if r.nread != r.f.UncompressedSize64 { + return 0, io.ErrUnexpectedEOF + } + if r.desr != nil { + if err1 := readDataDescriptor(r.desr, r.f); err1 != nil { + if err1 == io.EOF { + err = io.ErrUnexpectedEOF + } else { + err = err1 + } + } else if r.hash.Sum32() != r.f.CRC32 { + err = ErrChecksum + } + } else { + // If there's not a data descriptor, we still compare + // the CRC32 of what we've read against the file header + // or TOC's CRC32, if it seems like it was set. + if r.f.CRC32 != 0 && r.hash.Sum32() != r.f.CRC32 { + err = ErrChecksum + } + } + } + r.err = err + return +} + +func (r *checksumReader) Close() error { return r.rc.Close() } + +// findBodyOffset does the minimum work to verify the file has a header +// and returns the file body offset. +func (f *File) findBodyOffset() (int64, error) { + var buf [fileHeaderLen]byte + if _, err := f.zipr.ReadAt(buf[:], f.headerOffset); err != nil { + return 0, err + } + b := readBuf(buf[:]) + if sig := b.uint32(); sig != fileHeaderSignature { + return 0, ErrFormat + } + b = b[22:] // skip over most of the header + filenameLen := int(b.uint16()) + extraLen := int(b.uint16()) + return int64(fileHeaderLen + filenameLen + extraLen), nil +} + +// readDirectoryHeader attempts to read a directory header from r. +// It returns io.ErrUnexpectedEOF if it cannot read a complete header, +// and ErrFormat if it doesn't find a valid header signature. +func readDirectoryHeader(f *File, r io.Reader) error { + var buf [directoryHeaderLen]byte + if _, err := io.ReadFull(r, buf[:]); err != nil { + return err + } + b := readBuf(buf[:]) + if sig := b.uint32(); sig != directoryHeaderSignature { + return ErrFormat + } + f.CreatorVersion = b.uint16() + f.ReaderVersion = b.uint16() + f.Flags = b.uint16() + f.Method = b.uint16() + f.ModifiedTime = b.uint16() + f.ModifiedDate = b.uint16() + f.CRC32 = b.uint32() + f.CompressedSize = b.uint32() + f.UncompressedSize = b.uint32() + f.CompressedSize64 = uint64(f.CompressedSize) + f.UncompressedSize64 = uint64(f.UncompressedSize) + filenameLen := int(b.uint16()) + extraLen := int(b.uint16()) + commentLen := int(b.uint16()) + b = b[4:] // skipped start disk number and internal attributes (2x uint16) + f.ExternalAttrs = b.uint32() + f.headerOffset = int64(b.uint32()) + d := make([]byte, filenameLen+extraLen+commentLen) + if _, err := io.ReadFull(r, d); err != nil { + return err + } + f.Name = string(d[:filenameLen]) + f.Extra = d[filenameLen : filenameLen+extraLen] + f.Comment = string(d[filenameLen+extraLen:]) + + // Determine the character encoding. + utf8Valid1, utf8Require1 := detectUTF8(f.Name) + utf8Valid2, utf8Require2 := detectUTF8(f.Comment) + switch { + case !utf8Valid1 || !utf8Valid2: + // Name and Comment definitely not UTF-8. + f.NonUTF8 = true + case !utf8Require1 && !utf8Require2: + // Name and Comment use only single-byte runes that overlap with UTF-8. + f.NonUTF8 = false + default: + // Might be UTF-8, might be some other encoding; preserve existing flag. + // Some ZIP writers use UTF-8 encoding without setting the UTF-8 flag. + // Since it is impossible to always distinguish valid UTF-8 from some + // other encoding (e.g., GBK or Shift-JIS), we trust the flag. + f.NonUTF8 = f.Flags&0x800 == 0 + } + + needUSize := f.UncompressedSize == ^uint32(0) + needCSize := f.CompressedSize == ^uint32(0) + needHeaderOffset := f.headerOffset == int64(^uint32(0)) + + // Best effort to find what we need. + // Other zip authors might not even follow the basic format, + // and we'll just ignore the Extra content in that case. + var modified time.Time +parseExtras: + for extra := readBuf(f.Extra); len(extra) >= 4; { // need at least tag and size + fieldTag := extra.uint16() + fieldSize := int(extra.uint16()) + if len(extra) < fieldSize { + break + } + fieldBuf := extra.sub(fieldSize) + + switch fieldTag { + case zip64ExtraID: + f.zip64 = true + + // update directory values from the zip64 extra block. + // They should only be consulted if the sizes read earlier + // are maxed out. + // See golang.org/issue/13367. + if needUSize { + needUSize = false + if len(fieldBuf) < 8 { + return ErrFormat + } + f.UncompressedSize64 = fieldBuf.uint64() + } + if needCSize { + needCSize = false + if len(fieldBuf) < 8 { + return ErrFormat + } + f.CompressedSize64 = fieldBuf.uint64() + } + if needHeaderOffset { + needHeaderOffset = false + if len(fieldBuf) < 8 { + return ErrFormat + } + f.headerOffset = int64(fieldBuf.uint64()) + } + case ntfsExtraID: + if len(fieldBuf) < 4 { + continue parseExtras + } + fieldBuf.uint32() // reserved (ignored) + for len(fieldBuf) >= 4 { // need at least tag and size + attrTag := fieldBuf.uint16() + attrSize := int(fieldBuf.uint16()) + if len(fieldBuf) < attrSize { + continue parseExtras + } + attrBuf := fieldBuf.sub(attrSize) + if attrTag != 1 || attrSize != 24 { + continue // Ignore irrelevant attributes + } + + const ticksPerSecond = 1e7 // Windows timestamp resolution + ts := int64(attrBuf.uint64()) // ModTime since Windows epoch + secs := int64(ts / ticksPerSecond) + nsecs := (1e9 / ticksPerSecond) * int64(ts%ticksPerSecond) + epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC) + modified = time.Unix(epoch.Unix()+secs, nsecs) + } + case unixExtraID, infoZipUnixExtraID: + if len(fieldBuf) < 8 { + continue parseExtras + } + fieldBuf.uint32() // AcTime (ignored) + ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch + modified = time.Unix(ts, 0) + case extTimeExtraID: + if len(fieldBuf) < 5 || fieldBuf.uint8()&1 == 0 { + continue parseExtras + } + ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch + modified = time.Unix(ts, 0) + } + } + + msdosModified := msDosTimeToTime(f.ModifiedDate, f.ModifiedTime) + f.Modified = msdosModified + if !modified.IsZero() { + f.Modified = modified.UTC() + + // If legacy MS-DOS timestamps are set, we can use the delta between + // the legacy and extended versions to estimate timezone offset. + // + // A non-UTC timezone is always used (even if offset is zero). + // Thus, FileHeader.Modified.Location() == time.UTC is useful for + // determining whether extended timestamps are present. + // This is necessary for users that need to do additional time + // calculations when dealing with legacy ZIP formats. + if f.ModifiedTime != 0 || f.ModifiedDate != 0 { + f.Modified = modified.In(timeZone(msdosModified.Sub(modified))) + } + } + + // Assume that uncompressed size 2³²-1 could plausibly happen in + // an old zip32 file that was sharding inputs into the largest chunks + // possible (or is just malicious; search the web for 42.zip). + // If needUSize is true still, it means we didn't see a zip64 extension. + // As long as the compressed size is not also 2³²-1 (implausible) + // and the header is not also 2³²-1 (equally implausible), + // accept the uncompressed size 2³²-1 as valid. + // If nothing else, this keeps archive/zip working with 42.zip. + _ = needUSize + + if needCSize || needHeaderOffset { + return ErrFormat + } + + return nil +} + +func readDataDescriptor(r io.Reader, f *File) error { + var buf [dataDescriptorLen]byte + // The spec says: "Although not originally assigned a + // signature, the value 0x08074b50 has commonly been adopted + // as a signature value for the data descriptor record. + // Implementers should be aware that ZIP files may be + // encountered with or without this signature marking data + // descriptors and should account for either case when reading + // ZIP files to ensure compatibility." + // + // dataDescriptorLen includes the size of the signature but + // first read just those 4 bytes to see if it exists. + if _, err := io.ReadFull(r, buf[:4]); err != nil { + return err + } + off := 0 + maybeSig := readBuf(buf[:4]) + if maybeSig.uint32() != dataDescriptorSignature { + // No data descriptor signature. Keep these four + // bytes. + off += 4 + } + if _, err := io.ReadFull(r, buf[off:12]); err != nil { + return err + } + b := readBuf(buf[:12]) + if b.uint32() != f.CRC32 { + return ErrChecksum + } + + // The two sizes that follow here can be either 32 bits or 64 bits + // but the spec is not very clear on this and different + // interpretations has been made causing incompatibilities. We + // already have the sizes from the central directory so we can + // just ignore these. + + return nil +} + +func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, baseOffset int64, err error) { + // look for directoryEndSignature in the last 1k, then in the last 65k + var buf []byte + var directoryEndOffset int64 + for i, bLen := range []int64{1024, 65 * 1024} { + if bLen > size { + bLen = size + } + buf = make([]byte, int(bLen)) + if _, err := r.ReadAt(buf, size-bLen); err != nil && err != io.EOF { + return nil, 0, err + } + if p := findSignatureInBlock(buf); p >= 0 { + buf = buf[p:] + directoryEndOffset = size - bLen + int64(p) + break + } + if i == 1 || bLen == size { + return nil, 0, ErrFormat + } + } + + // read header into struct + b := readBuf(buf[4:]) // skip signature + d := &directoryEnd{ + diskNbr: uint32(b.uint16()), + dirDiskNbr: uint32(b.uint16()), + dirRecordsThisDisk: uint64(b.uint16()), + directoryRecords: uint64(b.uint16()), + directorySize: uint64(b.uint32()), + directoryOffset: uint64(b.uint32()), + commentLen: b.uint16(), + } + l := int(d.commentLen) + if l > len(b) { + return nil, 0, errors.New("zip: invalid comment length") + } + d.comment = string(b[:l]) + + // These values mean that the file can be a zip64 file + if d.directoryRecords == 0xffff || d.directorySize == 0xffff || d.directoryOffset == 0xffffffff { + p, err := findDirectory64End(r, directoryEndOffset) + if err == nil && p >= 0 { + directoryEndOffset = p + err = readDirectory64End(r, p, d) + } + if err != nil { + return nil, 0, err + } + } + + maxInt64 := uint64(1<<63 - 1) + if d.directorySize > maxInt64 || d.directoryOffset > maxInt64 { + return nil, 0, ErrFormat + } + + baseOffset = directoryEndOffset - int64(d.directorySize) - int64(d.directoryOffset) + + // Make sure directoryOffset points to somewhere in our file. + if o := baseOffset + int64(d.directoryOffset); o < 0 || o >= size { + return nil, 0, ErrFormat + } + + // If the directory end data tells us to use a non-zero baseOffset, + // but we would find a valid directory entry if we assume that the + // baseOffset is 0, then just use a baseOffset of 0. + // We've seen files in which the directory end data gives us + // an incorrect baseOffset. + if baseOffset > 0 { + off := int64(d.directoryOffset) + rs := io.NewSectionReader(r, off, size-off) + if readDirectoryHeader(&File{}, rs) == nil { + baseOffset = 0 + } + } + + return d, baseOffset, nil +} + +// findDirectory64End tries to read the zip64 locator just before the +// directory end and returns the offset of the zip64 directory end if +// found. +func findDirectory64End(r io.ReaderAt, directoryEndOffset int64) (int64, error) { + locOffset := directoryEndOffset - directory64LocLen + if locOffset < 0 { + return -1, nil // no need to look for a header outside the file + } + buf := make([]byte, directory64LocLen) + if _, err := r.ReadAt(buf, locOffset); err != nil { + return -1, err + } + b := readBuf(buf) + if sig := b.uint32(); sig != directory64LocSignature { + return -1, nil + } + if b.uint32() != 0 { // number of the disk with the start of the zip64 end of central directory + return -1, nil // the file is not a valid zip64-file + } + p := b.uint64() // relative offset of the zip64 end of central directory record + if b.uint32() != 1 { // total number of disks + return -1, nil // the file is not a valid zip64-file + } + return int64(p), nil +} + +// readDirectory64End reads the zip64 directory end and updates the +// directory end with the zip64 directory end values. +func readDirectory64End(r io.ReaderAt, offset int64, d *directoryEnd) (err error) { + buf := make([]byte, directory64EndLen) + if _, err := r.ReadAt(buf, offset); err != nil { + return err + } + + b := readBuf(buf) + if sig := b.uint32(); sig != directory64EndSignature { + return ErrFormat + } + + b = b[12:] // skip dir size, version and version needed (uint64 + 2x uint16) + d.diskNbr = b.uint32() // number of this disk + d.dirDiskNbr = b.uint32() // number of the disk with the start of the central directory + d.dirRecordsThisDisk = b.uint64() // total number of entries in the central directory on this disk + d.directoryRecords = b.uint64() // total number of entries in the central directory + d.directorySize = b.uint64() // size of the central directory + d.directoryOffset = b.uint64() // offset of start of central directory with respect to the starting disk number + + return nil +} + +func findSignatureInBlock(b []byte) int { + for i := len(b) - directoryEndLen; i >= 0; i-- { + // defined from directoryEndSignature in struct.go + if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 { + // n is length of comment + n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8 + if n+directoryEndLen+i <= len(b) { + return i + } + } + } + return -1 +} + +type readBuf []byte + +func (b *readBuf) uint8() uint8 { + v := (*b)[0] + *b = (*b)[1:] + return v +} + +func (b *readBuf) uint16() uint16 { + v := binary.LittleEndian.Uint16(*b) + *b = (*b)[2:] + return v +} + +func (b *readBuf) uint32() uint32 { + v := binary.LittleEndian.Uint32(*b) + *b = (*b)[4:] + return v +} + +func (b *readBuf) uint64() uint64 { + v := binary.LittleEndian.Uint64(*b) + *b = (*b)[8:] + return v +} + +func (b *readBuf) sub(n int) readBuf { + b2 := (*b)[:n] + *b = (*b)[n:] + return b2 +} + +// A fileListEntry is a File and its ename. +// If file == nil, the fileListEntry describes a directory without metadata. +type fileListEntry struct { + name string + file *File + isDir bool + isDup bool +} + +type fileInfoDirEntry interface { + fs.FileInfo + fs.DirEntry +} + +func (f *fileListEntry) stat() (fileInfoDirEntry, error) { + if f.isDup { + return nil, errors.New(f.name + ": duplicate entries in zip file") + } + if !f.isDir { + return headerFileInfo{&f.file.FileHeader}, nil + } + return f, nil +} + +// Only used for directories. +func (f *fileListEntry) Name() string { _, elem, _ := split(f.name); return elem } +func (f *fileListEntry) Size() int64 { return 0 } +func (f *fileListEntry) Mode() fs.FileMode { return fs.ModeDir | 0555 } +func (f *fileListEntry) Type() fs.FileMode { return fs.ModeDir } +func (f *fileListEntry) IsDir() bool { return true } +func (f *fileListEntry) Sys() any { return nil } + +func (f *fileListEntry) ModTime() time.Time { + if f.file == nil { + return time.Time{} + } + return f.file.FileHeader.Modified.UTC() +} + +func (f *fileListEntry) Info() (fs.FileInfo, error) { return f, nil } + +func (f *fileListEntry) String() string { + return fs.FormatDirEntry(f) +} + +// toValidName coerces name to be a valid name for fs.FS.Open. +func toValidName(name string) string { + name = strings.ReplaceAll(name, `\`, `/`) + p := path.Clean(name) + + p = strings.TrimPrefix(p, "/") + + for strings.HasPrefix(p, "../") { + p = p[len("../"):] + } + + return p +} + +func (r *Reader) initFileList() { + r.fileListOnce.Do(func() { + // files and knownDirs map from a file/directory name + // to an index into the r.fileList entry that we are + // building. They are used to mark duplicate entries. + files := make(map[string]int) + knownDirs := make(map[string]int) + + // dirs[name] is true if name is known to be a directory, + // because it appears as a prefix in a path. + dirs := make(map[string]bool) + + for _, file := range r.File { + isDir := len(file.Name) > 0 && file.Name[len(file.Name)-1] == '/' + name := toValidName(file.Name) + if name == "" { + continue + } + + if idx, ok := files[name]; ok { + r.fileList[idx].isDup = true + continue + } + if idx, ok := knownDirs[name]; ok { + r.fileList[idx].isDup = true + continue + } + + for dir := path.Dir(name); dir != "."; dir = path.Dir(dir) { + dirs[dir] = true + } + + idx := len(r.fileList) + entry := fileListEntry{ + name: name, + file: file, + isDir: isDir, + } + r.fileList = append(r.fileList, entry) + if isDir { + knownDirs[name] = idx + } else { + files[name] = idx + } + } + for dir := range dirs { + if _, ok := knownDirs[dir]; !ok { + if idx, ok := files[dir]; ok { + r.fileList[idx].isDup = true + } else { + entry := fileListEntry{ + name: dir, + file: nil, + isDir: true, + } + r.fileList = append(r.fileList, entry) + } + } + } + + sort.Slice(r.fileList, func(i, j int) bool { return fileEntryLess(r.fileList[i].name, r.fileList[j].name) }) + }) +} + +func fileEntryLess(x, y string) bool { + xdir, xelem, _ := split(x) + ydir, yelem, _ := split(y) + return xdir < ydir || xdir == ydir && xelem < yelem +} + +// Open opens the named file in the ZIP archive, +// using the semantics of fs.FS.Open: +// paths are always slash separated, with no +// leading / or ../ elements. +func (r *Reader) Open(name string) (fs.File, error) { + r.initFileList() + + if !fs.ValidPath(name) { + return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid} + } + e := r.openLookup(name) + if e == nil { + return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist} + } + if e.isDir { + return &openDir{e, r.openReadDir(name), 0}, nil + } + rc, err := e.file.Open() + if err != nil { + return nil, err + } + return rc.(fs.File), nil +} + +func split(name string) (dir, elem string, isDir bool) { + if len(name) > 0 && name[len(name)-1] == '/' { + isDir = true + name = name[:len(name)-1] + } + i := len(name) - 1 + for i >= 0 && name[i] != '/' { + i-- + } + if i < 0 { + return ".", name, isDir + } + return name[:i], name[i+1:], isDir +} + +var dotFile = &fileListEntry{name: "./", isDir: true} + +func (r *Reader) openLookup(name string) *fileListEntry { + if name == "." { + return dotFile + } + + dir, elem, _ := split(name) + files := r.fileList + i := sort.Search(len(files), func(i int) bool { + idir, ielem, _ := split(files[i].name) + return idir > dir || idir == dir && ielem >= elem + }) + if i < len(files) { + fname := files[i].name + if fname == name || len(fname) == len(name)+1 && fname[len(name)] == '/' && fname[:len(name)] == name { + return &files[i] + } + } + return nil +} + +func (r *Reader) openReadDir(dir string) []fileListEntry { + files := r.fileList + i := sort.Search(len(files), func(i int) bool { + idir, _, _ := split(files[i].name) + return idir >= dir + }) + j := sort.Search(len(files), func(j int) bool { + jdir, _, _ := split(files[j].name) + return jdir > dir + }) + return files[i:j] +} + +type openDir struct { + e *fileListEntry + files []fileListEntry + offset int +} + +func (d *openDir) Close() error { return nil } +func (d *openDir) Stat() (fs.FileInfo, error) { return d.e.stat() } + +func (d *openDir) Read([]byte) (int, error) { + return 0, &fs.PathError{Op: "read", Path: d.e.name, Err: errors.New("is a directory")} +} + +func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) { + n := len(d.files) - d.offset + if count > 0 && n > count { + n = count + } + if n == 0 { + if count <= 0 { + return nil, nil + } + return nil, io.EOF + } + list := make([]fs.DirEntry, n) + for i := range list { + s, err := d.files[d.offset+i].stat() + if err != nil { + return nil, err + } + list[i] = s + } + d.offset += n + return list, nil +} diff --git a/vendor/github.com/STARRY-S/zip/register.go b/vendor/github.com/STARRY-S/zip/register.go new file mode 100644 index 00000000..43892462 --- /dev/null +++ b/vendor/github.com/STARRY-S/zip/register.go @@ -0,0 +1,147 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zip + +import ( + "compress/flate" + "errors" + "io" + "sync" +) + +// A Compressor returns a new compressing writer, writing to w. +// The WriteCloser's Close method must be used to flush pending data to w. +// The Compressor itself must be safe to invoke from multiple goroutines +// simultaneously, but each returned writer will be used only by +// one goroutine at a time. +type Compressor func(w io.Writer) (io.WriteCloser, error) + +// A Decompressor returns a new decompressing reader, reading from r. +// The ReadCloser's Close method must be used to release associated resources. +// The Decompressor itself must be safe to invoke from multiple goroutines +// simultaneously, but each returned reader will be used only by +// one goroutine at a time. +type Decompressor func(r io.Reader) io.ReadCloser + +var flateWriterPool sync.Pool + +func newFlateWriter(w io.Writer) io.WriteCloser { + fw, ok := flateWriterPool.Get().(*flate.Writer) + if ok { + fw.Reset(w) + } else { + fw, _ = flate.NewWriter(w, 5) + } + return &pooledFlateWriter{fw: fw} +} + +type pooledFlateWriter struct { + mu sync.Mutex // guards Close and Write + fw *flate.Writer +} + +func (w *pooledFlateWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.fw == nil { + return 0, errors.New("Write after Close") + } + return w.fw.Write(p) +} + +func (w *pooledFlateWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.fw != nil { + err = w.fw.Close() + flateWriterPool.Put(w.fw) + w.fw = nil + } + return err +} + +var flateReaderPool sync.Pool + +func newFlateReader(r io.Reader) io.ReadCloser { + fr, ok := flateReaderPool.Get().(io.ReadCloser) + if ok { + fr.(flate.Resetter).Reset(r, nil) + } else { + fr = flate.NewReader(r) + } + return &pooledFlateReader{fr: fr} +} + +type pooledFlateReader struct { + mu sync.Mutex // guards Close and Read + fr io.ReadCloser +} + +func (r *pooledFlateReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.fr == nil { + return 0, errors.New("Read after Close") + } + return r.fr.Read(p) +} + +func (r *pooledFlateReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.fr != nil { + err = r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + } + return err +} + +var ( + compressors sync.Map // map[uint16]Compressor + decompressors sync.Map // map[uint16]Decompressor +) + +func init() { + compressors.Store(Store, Compressor(func(w io.Writer) (io.WriteCloser, error) { return &nopCloser{w}, nil })) + compressors.Store(Deflate, Compressor(func(w io.Writer) (io.WriteCloser, error) { return newFlateWriter(w), nil })) + + decompressors.Store(Store, Decompressor(io.NopCloser)) + decompressors.Store(Deflate, Decompressor(newFlateReader)) +} + +// RegisterDecompressor allows custom decompressors for a specified method ID. +// The common methods Store and Deflate are built in. +func RegisterDecompressor(method uint16, dcomp Decompressor) { + if _, dup := decompressors.LoadOrStore(method, dcomp); dup { + panic("decompressor already registered") + } +} + +// RegisterCompressor registers custom compressors for a specified method ID. +// The common methods Store and Deflate are built in. +func RegisterCompressor(method uint16, comp Compressor) { + if _, dup := compressors.LoadOrStore(method, comp); dup { + panic("compressor already registered") + } +} + +func compressor(method uint16) Compressor { + ci, ok := compressors.Load(method) + if !ok { + return nil + } + return ci.(Compressor) +} + +func decompressor(method uint16) Decompressor { + di, ok := decompressors.Load(method) + if !ok { + return nil + } + return di.(Decompressor) +} diff --git a/vendor/github.com/STARRY-S/zip/struct.go b/vendor/github.com/STARRY-S/zip/struct.go new file mode 100644 index 00000000..9a8e67cc --- /dev/null +++ b/vendor/github.com/STARRY-S/zip/struct.go @@ -0,0 +1,419 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package zip provides support for reading and writing ZIP archives. + +See the [ZIP specification] for details. + +This package does not support disk spanning. + +A note about ZIP64: + +To be backwards compatible the FileHeader has both 32 and 64 bit Size +fields. The 64 bit fields will always contain the correct value and +for normal archives both fields will be the same. For files requiring +the ZIP64 format the 32 bit fields will be 0xffffffff and the 64 bit +fields must be used instead. + +[ZIP specification]: https://www.pkware.com/appnote +*/ +package zip + +import ( + "io/fs" + "path" + "time" +) + +// Compression methods. +const ( + Store uint16 = 0 // no compression + Deflate uint16 = 8 // DEFLATE compressed +) + +const ( + fileHeaderSignature = 0x04034b50 + directoryHeaderSignature = 0x02014b50 + directoryEndSignature = 0x06054b50 + directory64LocSignature = 0x07064b50 + directory64EndSignature = 0x06064b50 + dataDescriptorSignature = 0x08074b50 // de-facto standard; required by OS X Finder + fileHeaderLen = 30 // + filename + extra + directoryHeaderLen = 46 // + filename + extra + comment + directoryEndLen = 22 // + comment + dataDescriptorLen = 16 // four uint32: descriptor signature, crc32, compressed size, size + dataDescriptor64Len = 24 // two uint32: signature, crc32 | two uint64: compressed size, size + directory64LocLen = 20 // + directory64EndLen = 56 // + extra + + // Constants for the first byte in CreatorVersion. + creatorFAT = 0 + creatorUnix = 3 + creatorNTFS = 11 + creatorVFAT = 14 + creatorMacOSX = 19 + + // Version numbers. + zipVersion20 = 20 // 2.0 + zipVersion45 = 45 // 4.5 (reads and writes zip64 archives) + + // Limits for non zip64 files. + uint16max = (1 << 16) - 1 + uint32max = (1 << 32) - 1 + + // Extra header IDs. + // + // IDs 0..31 are reserved for official use by PKWARE. + // IDs above that range are defined by third-party vendors. + // Since ZIP lacked high precision timestamps (nor an official specification + // of the timezone used for the date fields), many competing extra fields + // have been invented. Pervasive use effectively makes them "official". + // + // See http://mdfs.net/Docs/Comp/Archiving/Zip/ExtraField + zip64ExtraID = 0x0001 // Zip64 extended information + ntfsExtraID = 0x000a // NTFS + unixExtraID = 0x000d // UNIX + extTimeExtraID = 0x5455 // Extended timestamp + infoZipUnixExtraID = 0x5855 // Info-ZIP Unix extension +) + +// FileHeader describes a file within a ZIP file. +// See the [ZIP specification] for details. +// +// [ZIP specification]: https://www.pkware.com/appnote +type FileHeader struct { + // Name is the name of the file. + // + // It must be a relative path, not start with a drive letter (such as "C:"), + // and must use forward slashes instead of back slashes. A trailing slash + // indicates that this file is a directory and should have no data. + Name string + + // Comment is any arbitrary user-defined string shorter than 64KiB. + Comment string + + // NonUTF8 indicates that Name and Comment are not encoded in UTF-8. + // + // By specification, the only other encoding permitted should be CP-437, + // but historically many ZIP readers interpret Name and Comment as whatever + // the system's local character encoding happens to be. + // + // This flag should only be set if the user intends to encode a non-portable + // ZIP file for a specific localized region. Otherwise, the Writer + // automatically sets the ZIP format's UTF-8 flag for valid UTF-8 strings. + NonUTF8 bool + + CreatorVersion uint16 + ReaderVersion uint16 + Flags uint16 + + // Method is the compression method. If zero, Store is used. + Method uint16 + + // Modified is the modified time of the file. + // + // When reading, an extended timestamp is preferred over the legacy MS-DOS + // date field, and the offset between the times is used as the timezone. + // If only the MS-DOS date is present, the timezone is assumed to be UTC. + // + // When writing, an extended timestamp (which is timezone-agnostic) is + // always emitted. The legacy MS-DOS date field is encoded according to the + // location of the Modified time. + Modified time.Time + + // ModifiedTime is an MS-DOS-encoded time. + // + // Deprecated: Use Modified instead. + ModifiedTime uint16 + + // ModifiedDate is an MS-DOS-encoded date. + // + // Deprecated: Use Modified instead. + ModifiedDate uint16 + + // CRC32 is the CRC32 checksum of the file content. + CRC32 uint32 + + // CompressedSize is the compressed size of the file in bytes. + // If either the uncompressed or compressed size of the file + // does not fit in 32 bits, CompressedSize is set to ^uint32(0). + // + // Deprecated: Use CompressedSize64 instead. + CompressedSize uint32 + + // UncompressedSize is the compressed size of the file in bytes. + // If either the uncompressed or compressed size of the file + // does not fit in 32 bits, CompressedSize is set to ^uint32(0). + // + // Deprecated: Use UncompressedSize64 instead. + UncompressedSize uint32 + + // CompressedSize64 is the compressed size of the file in bytes. + CompressedSize64 uint64 + + // UncompressedSize64 is the uncompressed size of the file in bytes. + UncompressedSize64 uint64 + + Extra []byte + ExternalAttrs uint32 // Meaning depends on CreatorVersion +} + +// FileInfo returns an fs.FileInfo for the FileHeader. +func (h *FileHeader) FileInfo() fs.FileInfo { + return headerFileInfo{h} +} + +// headerFileInfo implements fs.FileInfo. +type headerFileInfo struct { + fh *FileHeader +} + +func (fi headerFileInfo) Name() string { return path.Base(fi.fh.Name) } +func (fi headerFileInfo) Size() int64 { + if fi.fh.UncompressedSize64 > 0 { + return int64(fi.fh.UncompressedSize64) + } + return int64(fi.fh.UncompressedSize) +} +func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi headerFileInfo) ModTime() time.Time { + if fi.fh.Modified.IsZero() { + return fi.fh.ModTime() + } + return fi.fh.Modified.UTC() +} +func (fi headerFileInfo) Mode() fs.FileMode { return fi.fh.Mode() } +func (fi headerFileInfo) Type() fs.FileMode { return fi.fh.Mode().Type() } +func (fi headerFileInfo) Sys() any { return fi.fh } + +func (fi headerFileInfo) Info() (fs.FileInfo, error) { return fi, nil } + +func (fi headerFileInfo) String() string { + return fs.FormatFileInfo(fi) +} + +// FileInfoHeader creates a partially-populated FileHeader from an +// fs.FileInfo. +// Because fs.FileInfo's Name method returns only the base name of +// the file it describes, it may be necessary to modify the Name field +// of the returned header to provide the full path name of the file. +// If compression is desired, callers should set the FileHeader.Method +// field; it is unset by default. +func FileInfoHeader(fi fs.FileInfo) (*FileHeader, error) { + size := fi.Size() + fh := &FileHeader{ + Name: fi.Name(), + UncompressedSize64: uint64(size), + } + fh.SetModTime(fi.ModTime()) + fh.SetMode(fi.Mode()) + if fh.UncompressedSize64 > uint32max { + fh.UncompressedSize = uint32max + } else { + fh.UncompressedSize = uint32(fh.UncompressedSize64) + } + return fh, nil +} + +type directoryEnd struct { + diskNbr uint32 // unused + dirDiskNbr uint32 // unused + dirRecordsThisDisk uint64 // unused + directoryRecords uint64 + directorySize uint64 + directoryOffset uint64 // relative to file + commentLen uint16 + comment string +} + +// timeZone returns a *time.Location based on the provided offset. +// If the offset is non-sensible, then this uses an offset of zero. +func timeZone(offset time.Duration) *time.Location { + const ( + minOffset = -12 * time.Hour // E.g., Baker island at -12:00 + maxOffset = +14 * time.Hour // E.g., Line island at +14:00 + offsetAlias = 15 * time.Minute // E.g., Nepal at +5:45 + ) + offset = offset.Round(offsetAlias) + if offset < minOffset || maxOffset < offset { + offset = 0 + } + return time.FixedZone("", int(offset/time.Second)) +} + +// msDosTimeToTime converts an MS-DOS date and time into a time.Time. +// The resolution is 2s. +// See: https://msdn.microsoft.com/en-us/library/ms724247(v=VS.85).aspx +func msDosTimeToTime(dosDate, dosTime uint16) time.Time { + return time.Date( + // date bits 0-4: day of month; 5-8: month; 9-15: years since 1980 + int(dosDate>>9+1980), + time.Month(dosDate>>5&0xf), + int(dosDate&0x1f), + + // time bits 0-4: second/2; 5-10: minute; 11-15: hour + int(dosTime>>11), + int(dosTime>>5&0x3f), + int(dosTime&0x1f*2), + 0, // nanoseconds + + time.UTC, + ) +} + +// timeToMsDosTime converts a time.Time to an MS-DOS date and time. +// The resolution is 2s. +// See: https://msdn.microsoft.com/en-us/library/ms724274(v=VS.85).aspx +func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) { + fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9) + fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11) + return +} + +// ModTime returns the modification time in UTC using the legacy +// ModifiedDate and ModifiedTime fields. +// +// Deprecated: Use Modified instead. +func (h *FileHeader) ModTime() time.Time { + return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime) +} + +// SetModTime sets the Modified, ModifiedTime, and ModifiedDate fields +// to the given time in UTC. +// +// Deprecated: Use Modified instead. +func (h *FileHeader) SetModTime(t time.Time) { + t = t.UTC() // Convert to UTC for compatibility + h.Modified = t + h.ModifiedDate, h.ModifiedTime = timeToMsDosTime(t) +} + +const ( + // Unix constants. The specification doesn't mention them, + // but these seem to be the values agreed on by tools. + s_IFMT = 0xf000 + s_IFSOCK = 0xc000 + s_IFLNK = 0xa000 + s_IFREG = 0x8000 + s_IFBLK = 0x6000 + s_IFDIR = 0x4000 + s_IFCHR = 0x2000 + s_IFIFO = 0x1000 + s_ISUID = 0x800 + s_ISGID = 0x400 + s_ISVTX = 0x200 + + msdosDir = 0x10 + msdosReadOnly = 0x01 +) + +// Mode returns the permission and mode bits for the FileHeader. +func (h *FileHeader) Mode() (mode fs.FileMode) { + switch h.CreatorVersion >> 8 { + case creatorUnix, creatorMacOSX: + mode = unixModeToFileMode(h.ExternalAttrs >> 16) + case creatorNTFS, creatorVFAT, creatorFAT: + mode = msdosModeToFileMode(h.ExternalAttrs) + } + if len(h.Name) > 0 && h.Name[len(h.Name)-1] == '/' { + mode |= fs.ModeDir + } + return mode +} + +// SetMode changes the permission and mode bits for the FileHeader. +func (h *FileHeader) SetMode(mode fs.FileMode) { + h.CreatorVersion = h.CreatorVersion&0xff | creatorUnix<<8 + h.ExternalAttrs = fileModeToUnixMode(mode) << 16 + + // set MSDOS attributes too, as the original zip does. + if mode&fs.ModeDir != 0 { + h.ExternalAttrs |= msdosDir + } + if mode&0200 == 0 { + h.ExternalAttrs |= msdosReadOnly + } +} + +// isZip64 reports whether the file size exceeds the 32 bit limit +func (h *FileHeader) isZip64() bool { + return h.CompressedSize64 >= uint32max || h.UncompressedSize64 >= uint32max +} + +func (h *FileHeader) hasDataDescriptor() bool { + return h.Flags&0x8 != 0 +} + +func msdosModeToFileMode(m uint32) (mode fs.FileMode) { + if m&msdosDir != 0 { + mode = fs.ModeDir | 0777 + } else { + mode = 0666 + } + if m&msdosReadOnly != 0 { + mode &^= 0222 + } + return mode +} + +func fileModeToUnixMode(mode fs.FileMode) uint32 { + var m uint32 + switch mode & fs.ModeType { + default: + m = s_IFREG + case fs.ModeDir: + m = s_IFDIR + case fs.ModeSymlink: + m = s_IFLNK + case fs.ModeNamedPipe: + m = s_IFIFO + case fs.ModeSocket: + m = s_IFSOCK + case fs.ModeDevice: + m = s_IFBLK + case fs.ModeDevice | fs.ModeCharDevice: + m = s_IFCHR + } + if mode&fs.ModeSetuid != 0 { + m |= s_ISUID + } + if mode&fs.ModeSetgid != 0 { + m |= s_ISGID + } + if mode&fs.ModeSticky != 0 { + m |= s_ISVTX + } + return m | uint32(mode&0777) +} + +func unixModeToFileMode(m uint32) fs.FileMode { + mode := fs.FileMode(m & 0777) + switch m & s_IFMT { + case s_IFBLK: + mode |= fs.ModeDevice + case s_IFCHR: + mode |= fs.ModeDevice | fs.ModeCharDevice + case s_IFDIR: + mode |= fs.ModeDir + case s_IFIFO: + mode |= fs.ModeNamedPipe + case s_IFLNK: + mode |= fs.ModeSymlink + case s_IFREG: + // nothing to do + case s_IFSOCK: + mode |= fs.ModeSocket + } + if m&s_ISGID != 0 { + mode |= fs.ModeSetgid + } + if m&s_ISUID != 0 { + mode |= fs.ModeSetuid + } + if m&s_ISVTX != 0 { + mode |= fs.ModeSticky + } + return mode +} diff --git a/vendor/github.com/STARRY-S/zip/updater.go b/vendor/github.com/STARRY-S/zip/updater.go new file mode 100644 index 00000000..f2b9169a --- /dev/null +++ b/vendor/github.com/STARRY-S/zip/updater.go @@ -0,0 +1,537 @@ +package zip + +import ( + "errors" + "hash/crc32" + "io" + "path/filepath" + "strings" +) + +// sectionReaderWriter implements io.Reader, io.Writer, io.Seeker, io.ReaderAt, +// io.WriterAt interfaces based on io.ReadWriteSeeker. +type sectionReaderWriter struct { + rws io.ReadWriteSeeker +} + +func newSectionReaderWriter(rws io.ReadWriteSeeker) *sectionReaderWriter { + return §ionReaderWriter{ + rws: rws, + } +} + +func (s *sectionReaderWriter) ReadAt(p []byte, offset int64) (int, error) { + currOffset, err := s.rws.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err + } + defer s.rws.Seek(currOffset, io.SeekStart) + _, err = s.rws.Seek(offset, io.SeekStart) + if err != nil { + return 0, err + } + return s.rws.Read(p) +} + +func (s *sectionReaderWriter) WriteAt(p []byte, offset int64) (n int, err error) { + currOffset, err := s.rws.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err + } + defer s.rws.Seek(currOffset, io.SeekStart) + _, err = s.rws.Seek(offset, io.SeekStart) + if err != nil { + return 0, err + } + return s.rws.Write(p) +} + +func (s *sectionReaderWriter) Seek(offset int64, whence int) (int64, error) { + return s.rws.Seek(offset, whence) +} + +func (s *sectionReaderWriter) Read(p []byte) (n int, err error) { + return s.rws.Read(p) +} + +func (s *sectionReaderWriter) Write(p []byte) (n int, err error) { + return s.rws.Write(p) +} + +func (s *sectionReaderWriter) offset() (int64, error) { + return s.rws.Seek(0, io.SeekCurrent) +} + +type Directory struct { + FileHeader + offset int64 // header offset +} + +func (d *Directory) HeaderOffset() int64 { + return d.offset +} + +// Updater allows to modify & append files into an existing zip archive without +// decompress the whole file. +type Updater struct { + rw *sectionReaderWriter + offset int64 + dir []*header + last *fileWriter + closed bool + compressors map[uint16]Compressor + comment string + + // Some JAR files are zip files with a prefix that is a bash script. + // The baseOffset field is the start of the zip file proper. + baseOffset int64 + + dirOffset int64 +} + +// NewReader returns a new Updater from io.ReadWriteSeeker, which is assumed to +// have the given size in bytes. +func NewUpdater(rws io.ReadWriteSeeker) (*Updater, error) { + size, err := rws.Seek(0, io.SeekEnd) + if err != nil { + return nil, err + } + zu := &Updater{ + rw: newSectionReaderWriter(rws), + } + if err = zu.init(size); err != nil && err != ErrInsecurePath { + return nil, err + } + return zu, nil +} + +func (u *Updater) init(size int64) error { + end, baseOffset, err := readDirectoryEnd(u.rw, size) + if err != nil { + return err + } + u.baseOffset = baseOffset + u.dirOffset = int64(end.directoryOffset) + // Since the number of directory records is not validated, it is not + // safe to preallocate r.File without first checking that the specified + // number of files is reasonable, since a malformed archive may + // indicate it contains up to 1 << 128 - 1 files. Since each file has a + // header which will be _at least_ 30 bytes we can safely preallocate + // if (data size / 30) >= end.directoryRecords. + if end.directorySize < uint64(size) && (uint64(size)-end.directorySize)/30 >= end.directoryRecords { + u.dir = make([]*header, 0, end.directoryRecords) + } + u.comment = end.comment + if _, err = u.rw.Seek(u.baseOffset+int64(end.directoryOffset), io.SeekStart); err != nil { + return err + } + + // The count of files inside a zip is truncated to fit in a uint16. + // Gloss over this by reading headers until we encounter + // a bad one, and then only report an ErrFormat or UnexpectedEOF if + // the file count modulo 65536 is incorrect. + for { + f := &File{zip: nil, zipr: u.rw} + err = readDirectoryHeader(f, u.rw) + if err == ErrFormat || err == io.ErrUnexpectedEOF { + break + } + if err != nil { + return err + } + f.headerOffset += u.baseOffset + h := &header{ + FileHeader: &f.FileHeader, + offset: uint64(f.headerOffset), + } + u.dir = append(u.dir, h) + } + if uint16(len(u.dir)) != uint16(end.directoryRecords) { // only compare 16 bits here + // Return the readDirectoryHeader error if we read + // the wrong number of directory entries. + return err + } + for _, d := range u.dir { + if d.Name == "" { + // Zip permits an empty file name field. + continue + } + // The zip specification states that names must use forward slashes, + // so consider any backslashes in the name insecure. + if !filepath.IsLocal(d.Name) || strings.Contains(d.Name, "\\") { + return ErrInsecurePath + } + } + return nil +} + +func (u *Updater) Directory() []Directory { + dir := make([]Directory, 0, len(u.dir)) + for _, d := range u.dir { + dir = append(dir, Directory{ + FileHeader: *d.FileHeader, + offset: int64(d.offset), + }) + } + return dir +} + +// Append adds a file to the zip file using the provided name. +// It returns a Writer to which the file contents should be written. +// The file contents will be compressed using the Deflate method. +// The name must be a relative path: it must not start with a drive +// letter (e.g. C:) or leading slash, and only forward slashes are +// allowed. To create a directory instead of a file, add a trailing +// slash to the name. +// The file's contents must be written to the io.Writer before the next +// call to Append, AppendHeader, or Close. +func (u *Updater) Append(name string) (io.Writer, error) { + h := &FileHeader{ + Name: name, + Method: Deflate, + } + return u.AppendHeaderAt(h, -1) +} + +// AppendAt adds a file to the zip file to specific offset. +// If the offset is less than 0, data will be appended after the last file +// in the zip archive. +// It should be noted that the size of the newly appended file should be larger +// than the size of the existing file. +// Especially when using the Deflate compression method, the compressed +// data size should be larger than the original file data size. +func (u *Updater) AppendAt(name string, offset int64) (io.Writer, error) { + h := &FileHeader{ + Name: name, + Method: Deflate, + } + return u.AppendHeaderAt(h, offset) +} + +func (u *Updater) prepare(fh *FileHeader, offset int64) error { + if u.last != nil && !u.last.closed { + if err := u.last.close(); err != nil { + return err + } + // update the dirOffset + dirOffset, err := u.rw.offset() + if err != nil { + return err + } + u.dirOffset = dirOffset + } + if len(u.dir) > 0 && u.dir[len(u.dir)-1].FileHeader == fh { + // See https://golang.org/issue/11144 confusion. + return errors.New("archive/zip: invalid duplicate FileHeader") + } + return nil +} + +// AppendHeaderAt adds a file to the zip archive using the provided FileHeader +// for the file metadata to the specific offset. +// Writer takes ownership of fh and may mutate its fields. +// The caller must not modify fh after calling CreateHeader. +// +// If the offset is less than 0, data will be appended after the last file +// in the zip archive. +// +// It should be noted that the size of the newly appended file should be larger +// than the size of the existing file. Especially when using the Deflate +// compression method, the compressed data size should be larger than the +// original file data size. +func (u *Updater) AppendHeaderAt(fh *FileHeader, offset int64) (io.Writer, error) { + if err := u.prepare(fh, offset); err != nil { + return nil, err + } + + if offset < 0 { + offset = u.dirOffset + } + // Offset should match existing header offsets or equal to directory offset. + var offsetExists bool + for i, h := range u.dir { + if h.offset == uint64(offset) { + offsetExists = true + // Delete the corresponding header. + u.dir = append(u.dir[:i], u.dir[i+1:]...) + break + } + } + if !offsetExists && offset != u.dirOffset { + return nil, errors.New("archive/zip: invalid header offset provided") + } + + // Seek the file offset. + _, err := u.rw.Seek(offset, io.SeekStart) + if err != nil { + return nil, err + } + u.offset = offset + + // The ZIP format has a sad state of affairs regarding character encoding. + // Officially, the name and comment fields are supposed to be encoded + // in CP-437 (which is mostly compatible with ASCII), unless the UTF-8 + // flag bit is set. However, there are several problems: + // + // * Many ZIP readers still do not support UTF-8. + // * If the UTF-8 flag is cleared, several readers simply interpret the + // name and comment fields as whatever the local system encoding is. + // + // In order to avoid breaking readers without UTF-8 support, + // we avoid setting the UTF-8 flag if the strings are CP-437 compatible. + // However, if the strings require multibyte UTF-8 encoding and is a + // valid UTF-8 string, then we set the UTF-8 bit. + // + // For the case, where the user explicitly wants to specify the encoding + // as UTF-8, they will need to set the flag bit themselves. + utf8Valid1, utf8Require1 := detectUTF8(fh.Name) + utf8Valid2, utf8Require2 := detectUTF8(fh.Comment) + switch { + case fh.NonUTF8: + fh.Flags &^= 0x800 + case (utf8Require1 || utf8Require2) && (utf8Valid1 && utf8Valid2): + fh.Flags |= 0x800 + } + + fh.CreatorVersion = fh.CreatorVersion&0xff00 | zipVersion20 // preserve compatibility byte + fh.ReaderVersion = zipVersion20 + + // If Modified is set, this takes precedence over MS-DOS timestamp fields. + if !fh.Modified.IsZero() { + // Contrary to the FileHeader.SetModTime method, we intentionally + // do not convert to UTC, because we assume the user intends to encode + // the date using the specified timezone. A user may want this control + // because many legacy ZIP readers interpret the timestamp according + // to the local timezone. + // + // The timezone is only non-UTC if a user directly sets the Modified + // field directly themselves. All other approaches sets UTC. + fh.ModifiedDate, fh.ModifiedTime = timeToMsDosTime(fh.Modified) + + // Use "extended timestamp" format since this is what Info-ZIP uses. + // Nearly every major ZIP implementation uses a different format, + // but at least most seem to be able to understand the other formats. + // + // This format happens to be identical for both local and central header + // if modification time is the only timestamp being encoded. + var mbuf [9]byte // 2*SizeOf(uint16) + SizeOf(uint8) + SizeOf(uint32) + mt := uint32(fh.Modified.Unix()) + eb := writeBuf(mbuf[:]) + eb.uint16(extTimeExtraID) + eb.uint16(5) // Size: SizeOf(uint8) + SizeOf(uint32) + eb.uint8(1) // Flags: ModTime + eb.uint32(mt) // ModTime + fh.Extra = append(fh.Extra, mbuf[:]...) + } + + var ( + ow io.Writer + fw *fileWriter + ) + h := &header{ + FileHeader: fh, + offset: uint64(u.offset), + } + if strings.HasSuffix(fh.Name, "/") { + // Set the compression method to Store to ensure data length is truly zero, + // which the writeHeader method always encodes for the size fields. + // This is necessary as most compression formats have non-zero lengths + // even when compressing an empty string. + fh.Method = Store + fh.Flags &^= 0x8 // we will not write a data descriptor + + // Explicitly clear sizes as they have no meaning for directories. + fh.CompressedSize = 0 + fh.CompressedSize64 = 0 + fh.UncompressedSize = 0 + fh.UncompressedSize64 = 0 + + ow = dirWriter{} + } else { + fh.Flags |= 0x8 // we will write a data descriptor + + fw = &fileWriter{ + zipw: u.rw, + compCount: &countWriter{w: u.rw}, + crc32: crc32.NewIEEE(), + } + comp := u.compressor(fh.Method) + if comp == nil { + return nil, ErrAlgorithm + } + var err error + fw.comp, err = comp(fw.compCount) + if err != nil { + return nil, err + } + fw.rawCount = &countWriter{w: fw.comp} + fw.header = h + ow = fw + } + u.dir = append(u.dir, h) + if err := writeHeader(u.rw, h); err != nil { + return nil, err + } + // If we're creating a directory, fw is nil. + u.last = fw + u.dirOffset, err = u.rw.offset() + if err != nil { + return nil, err + } + + return ow, nil +} + +func (u *Updater) compressor(method uint16) Compressor { + comp := u.compressors[method] + if comp == nil { + comp = compressor(method) + } + return comp +} + +func (u *Updater) SetComment(comment string) error { + if len(comment) > uint16max { + return errors.New("zip: Writer.Comment too long") + } + u.comment = comment + return nil +} + +func (u *Updater) GetComment() string { + return u.comment +} + +func (u *Updater) Close() error { + if u.last != nil && !u.last.closed { + if err := u.last.close(); err != nil { + return err + } + u.last = nil + } + if u.closed { + return errors.New("zip: updater closed twice") + } + u.closed = true + + // write central directory + start, err := u.rw.offset() + if err != nil { + return err + } + for _, h := range u.dir { + var buf []byte = make([]byte, directoryHeaderLen) + b := writeBuf(buf) + b.uint32(uint32(directoryHeaderSignature)) + b.uint16(h.CreatorVersion) + b.uint16(h.ReaderVersion) + b.uint16(h.Flags) + b.uint16(h.Method) + b.uint16(h.ModifiedTime) + b.uint16(h.ModifiedDate) + b.uint32(h.CRC32) + if h.isZip64() || h.offset >= uint32max { + // the file needs a zip64 header. store maxint in both + // 32 bit size fields (and offset later) to signal that the + // zip64 extra header should be used. + b.uint32(uint32max) // compressed size + b.uint32(uint32max) // uncompressed size + + // append a zip64 extra block to Extra + var buf [28]byte // 2x uint16 + 3x uint64 + eb := writeBuf(buf[:]) + eb.uint16(zip64ExtraID) + eb.uint16(24) // size = 3x uint64 + eb.uint64(h.UncompressedSize64) + eb.uint64(h.CompressedSize64) + eb.uint64(uint64(h.offset)) + h.Extra = append(h.Extra, buf[:]...) + } else { + b.uint32(h.CompressedSize) + b.uint32(h.UncompressedSize) + } + + b.uint16(uint16(len(h.Name))) + b.uint16(uint16(len(h.Extra))) + b.uint16(uint16(len(h.Comment))) + b = b[4:] // skip disk number start and internal file attr (2x uint16) + b.uint32(h.ExternalAttrs) + if h.offset > uint32max { + b.uint32(uint32max) + } else { + b.uint32(uint32(h.offset)) + } + if _, err := u.rw.Write(buf); err != nil { + return err + } + if _, err := io.WriteString(u.rw, h.Name); err != nil { + return err + } + if _, err := u.rw.Write(h.Extra); err != nil { + return err + } + if _, err := io.WriteString(u.rw, h.Comment); err != nil { + return err + } + } + end, err := u.rw.offset() + if err != nil { + return err + } + + records := uint64(len(u.dir)) + size := uint64(end - start) + offset := uint64(start) + + if records >= uint16max || size >= uint32max || offset >= uint32max { + var buf [directory64EndLen + directory64LocLen]byte + b := writeBuf(buf[:]) + + // zip64 end of central directory record + b.uint32(directory64EndSignature) + b.uint64(directory64EndLen - 12) // length minus signature (uint32) and length fields (uint64) + b.uint16(zipVersion45) // version made by + b.uint16(zipVersion45) // version needed to extract + b.uint32(0) // number of this disk + b.uint32(0) // number of the disk with the start of the central directory + b.uint64(records) // total number of entries in the central directory on this disk + b.uint64(records) // total number of entries in the central directory + b.uint64(size) // size of the central directory + b.uint64(offset) // offset of start of central directory with respect to the starting disk number + + // zip64 end of central directory locator + b.uint32(directory64LocSignature) + b.uint32(0) // number of the disk with the start of the zip64 end of central directory + b.uint64(uint64(end)) // relative offset of the zip64 end of central directory record + b.uint32(1) // total number of disks + + if _, err := u.rw.Write(buf[:]); err != nil { + return err + } + + // store max values in the regular end record to signal + // that the zip64 values should be used instead + records = uint16max + size = uint32max + offset = uint32max + } + + // write end record + var buf [directoryEndLen]byte + b := writeBuf(buf[:]) + b.uint32(uint32(directoryEndSignature)) + b = b[4:] // skip over disk number and first disk number (2x uint16) + b.uint16(uint16(records)) // number of entries this disk + b.uint16(uint16(records)) // number of entries total + b.uint32(uint32(size)) // size of directory + b.uint32(uint32(offset)) // start of directory + b.uint16(uint16(len(u.comment))) // byte size of EOCD comment + if _, err := u.rw.Write(buf[:]); err != nil { + return err + } + if _, err := io.WriteString(u.rw, u.comment); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/STARRY-S/zip/writer.go b/vendor/github.com/STARRY-S/zip/writer.go new file mode 100644 index 00000000..3b23cc33 --- /dev/null +++ b/vendor/github.com/STARRY-S/zip/writer.go @@ -0,0 +1,634 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zip + +import ( + "bufio" + "encoding/binary" + "errors" + "hash" + "hash/crc32" + "io" + "strings" + "unicode/utf8" +) + +var ( + errLongName = errors.New("zip: FileHeader.Name too long") + errLongExtra = errors.New("zip: FileHeader.Extra too long") +) + +// Writer implements a zip file writer. +type Writer struct { + cw *countWriter + dir []*header + last *fileWriter + closed bool + compressors map[uint16]Compressor + comment string + + // testHookCloseSizeOffset if non-nil is called with the size + // of offset of the central directory at Close. + testHookCloseSizeOffset func(size, offset uint64) +} + +type header struct { + *FileHeader + offset uint64 + raw bool +} + +// NewWriter returns a new Writer writing a zip file to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}} +} + +// SetOffset sets the offset of the beginning of the zip data within the +// underlying writer. It should be used when the zip data is appended to an +// existing file, such as a binary executable. +// It must be called before any data is written. +func (w *Writer) SetOffset(n int64) { + if w.cw.count != 0 { + panic("zip: SetOffset called after data was written") + } + w.cw.count = n +} + +// Flush flushes any buffered data to the underlying writer. +// Calling Flush is not normally necessary; calling Close is sufficient. +func (w *Writer) Flush() error { + return w.cw.w.(*bufio.Writer).Flush() +} + +// SetComment sets the end-of-central-directory comment field. +// It can only be called before Close. +func (w *Writer) SetComment(comment string) error { + if len(comment) > uint16max { + return errors.New("zip: Writer.Comment too long") + } + w.comment = comment + return nil +} + +// Close finishes writing the zip file by writing the central directory. +// It does not close the underlying writer. +func (w *Writer) Close() error { + if w.last != nil && !w.last.closed { + if err := w.last.close(); err != nil { + return err + } + w.last = nil + } + if w.closed { + return errors.New("zip: writer closed twice") + } + w.closed = true + + // write central directory + start := w.cw.count + for _, h := range w.dir { + var buf [directoryHeaderLen]byte + b := writeBuf(buf[:]) + b.uint32(uint32(directoryHeaderSignature)) + b.uint16(h.CreatorVersion) + b.uint16(h.ReaderVersion) + b.uint16(h.Flags) + b.uint16(h.Method) + b.uint16(h.ModifiedTime) + b.uint16(h.ModifiedDate) + b.uint32(h.CRC32) + if h.isZip64() || h.offset >= uint32max { + // the file needs a zip64 header. store maxint in both + // 32 bit size fields (and offset later) to signal that the + // zip64 extra header should be used. + b.uint32(uint32max) // compressed size + b.uint32(uint32max) // uncompressed size + + // append a zip64 extra block to Extra + var buf [28]byte // 2x uint16 + 3x uint64 + eb := writeBuf(buf[:]) + eb.uint16(zip64ExtraID) + eb.uint16(24) // size = 3x uint64 + eb.uint64(h.UncompressedSize64) + eb.uint64(h.CompressedSize64) + eb.uint64(h.offset) + h.Extra = append(h.Extra, buf[:]...) + } else { + b.uint32(h.CompressedSize) + b.uint32(h.UncompressedSize) + } + + b.uint16(uint16(len(h.Name))) + b.uint16(uint16(len(h.Extra))) + b.uint16(uint16(len(h.Comment))) + b = b[4:] // skip disk number start and internal file attr (2x uint16) + b.uint32(h.ExternalAttrs) + if h.offset > uint32max { + b.uint32(uint32max) + } else { + b.uint32(uint32(h.offset)) + } + if _, err := w.cw.Write(buf[:]); err != nil { + return err + } + if _, err := io.WriteString(w.cw, h.Name); err != nil { + return err + } + if _, err := w.cw.Write(h.Extra); err != nil { + return err + } + if _, err := io.WriteString(w.cw, h.Comment); err != nil { + return err + } + } + end := w.cw.count + + records := uint64(len(w.dir)) + size := uint64(end - start) + offset := uint64(start) + + if f := w.testHookCloseSizeOffset; f != nil { + f(size, offset) + } + + if records >= uint16max || size >= uint32max || offset >= uint32max { + var buf [directory64EndLen + directory64LocLen]byte + b := writeBuf(buf[:]) + + // zip64 end of central directory record + b.uint32(directory64EndSignature) + b.uint64(directory64EndLen - 12) // length minus signature (uint32) and length fields (uint64) + b.uint16(zipVersion45) // version made by + b.uint16(zipVersion45) // version needed to extract + b.uint32(0) // number of this disk + b.uint32(0) // number of the disk with the start of the central directory + b.uint64(records) // total number of entries in the central directory on this disk + b.uint64(records) // total number of entries in the central directory + b.uint64(size) // size of the central directory + b.uint64(offset) // offset of start of central directory with respect to the starting disk number + + // zip64 end of central directory locator + b.uint32(directory64LocSignature) + b.uint32(0) // number of the disk with the start of the zip64 end of central directory + b.uint64(uint64(end)) // relative offset of the zip64 end of central directory record + b.uint32(1) // total number of disks + + if _, err := w.cw.Write(buf[:]); err != nil { + return err + } + + // store max values in the regular end record to signal + // that the zip64 values should be used instead + records = uint16max + size = uint32max + offset = uint32max + } + + // write end record + var buf [directoryEndLen]byte + b := writeBuf(buf[:]) + b.uint32(uint32(directoryEndSignature)) + b = b[4:] // skip over disk number and first disk number (2x uint16) + b.uint16(uint16(records)) // number of entries this disk + b.uint16(uint16(records)) // number of entries total + b.uint32(uint32(size)) // size of directory + b.uint32(uint32(offset)) // start of directory + b.uint16(uint16(len(w.comment))) // byte size of EOCD comment + if _, err := w.cw.Write(buf[:]); err != nil { + return err + } + if _, err := io.WriteString(w.cw, w.comment); err != nil { + return err + } + + return w.cw.w.(*bufio.Writer).Flush() +} + +// Create adds a file to the zip file using the provided name. +// It returns a Writer to which the file contents should be written. +// The file contents will be compressed using the Deflate method. +// The name must be a relative path: it must not start with a drive +// letter (e.g. C:) or leading slash, and only forward slashes are +// allowed. To create a directory instead of a file, add a trailing +// slash to the name. +// The file's contents must be written to the io.Writer before the next +// call to Create, CreateHeader, or Close. +func (w *Writer) Create(name string) (io.Writer, error) { + header := &FileHeader{ + Name: name, + Method: Deflate, + } + return w.CreateHeader(header) +} + +// detectUTF8 reports whether s is a valid UTF-8 string, and whether the string +// must be considered UTF-8 encoding (i.e., not compatible with CP-437, ASCII, +// or any other common encoding). +func detectUTF8(s string) (valid, require bool) { + for i := 0; i < len(s); { + r, size := utf8.DecodeRuneInString(s[i:]) + i += size + // Officially, ZIP uses CP-437, but many readers use the system's + // local character encoding. Most encoding are compatible with a large + // subset of CP-437, which itself is ASCII-like. + // + // Forbid 0x7e and 0x5c since EUC-KR and Shift-JIS replace those + // characters with localized currency and overline characters. + if r < 0x20 || r > 0x7d || r == 0x5c { + if !utf8.ValidRune(r) || (r == utf8.RuneError && size == 1) { + return false, false + } + require = true + } + } + return true, require +} + +// prepare performs the bookkeeping operations required at the start of +// CreateHeader and CreateRaw. +func (w *Writer) prepare(fh *FileHeader) error { + if w.last != nil && !w.last.closed { + if err := w.last.close(); err != nil { + return err + } + } + if len(w.dir) > 0 && w.dir[len(w.dir)-1].FileHeader == fh { + // See https://golang.org/issue/11144 confusion. + return errors.New("archive/zip: invalid duplicate FileHeader") + } + return nil +} + +// CreateHeader adds a file to the zip archive using the provided FileHeader +// for the file metadata. Writer takes ownership of fh and may mutate +// its fields. The caller must not modify fh after calling CreateHeader. +// +// This returns a Writer to which the file contents should be written. +// The file's contents must be written to the io.Writer before the next +// call to Create, CreateHeader, CreateRaw, or Close. +func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) { + if err := w.prepare(fh); err != nil { + return nil, err + } + + // The ZIP format has a sad state of affairs regarding character encoding. + // Officially, the name and comment fields are supposed to be encoded + // in CP-437 (which is mostly compatible with ASCII), unless the UTF-8 + // flag bit is set. However, there are several problems: + // + // * Many ZIP readers still do not support UTF-8. + // * If the UTF-8 flag is cleared, several readers simply interpret the + // name and comment fields as whatever the local system encoding is. + // + // In order to avoid breaking readers without UTF-8 support, + // we avoid setting the UTF-8 flag if the strings are CP-437 compatible. + // However, if the strings require multibyte UTF-8 encoding and is a + // valid UTF-8 string, then we set the UTF-8 bit. + // + // For the case, where the user explicitly wants to specify the encoding + // as UTF-8, they will need to set the flag bit themselves. + utf8Valid1, utf8Require1 := detectUTF8(fh.Name) + utf8Valid2, utf8Require2 := detectUTF8(fh.Comment) + switch { + case fh.NonUTF8: + fh.Flags &^= 0x800 + case (utf8Require1 || utf8Require2) && (utf8Valid1 && utf8Valid2): + fh.Flags |= 0x800 + } + + fh.CreatorVersion = fh.CreatorVersion&0xff00 | zipVersion20 // preserve compatibility byte + fh.ReaderVersion = zipVersion20 + + // If Modified is set, this takes precedence over MS-DOS timestamp fields. + if !fh.Modified.IsZero() { + // Contrary to the FileHeader.SetModTime method, we intentionally + // do not convert to UTC, because we assume the user intends to encode + // the date using the specified timezone. A user may want this control + // because many legacy ZIP readers interpret the timestamp according + // to the local timezone. + // + // The timezone is only non-UTC if a user directly sets the Modified + // field directly themselves. All other approaches sets UTC. + fh.ModifiedDate, fh.ModifiedTime = timeToMsDosTime(fh.Modified) + + // Use "extended timestamp" format since this is what Info-ZIP uses. + // Nearly every major ZIP implementation uses a different format, + // but at least most seem to be able to understand the other formats. + // + // This format happens to be identical for both local and central header + // if modification time is the only timestamp being encoded. + var mbuf [9]byte // 2*SizeOf(uint16) + SizeOf(uint8) + SizeOf(uint32) + mt := uint32(fh.Modified.Unix()) + eb := writeBuf(mbuf[:]) + eb.uint16(extTimeExtraID) + eb.uint16(5) // Size: SizeOf(uint8) + SizeOf(uint32) + eb.uint8(1) // Flags: ModTime + eb.uint32(mt) // ModTime + fh.Extra = append(fh.Extra, mbuf[:]...) + } + + var ( + ow io.Writer + fw *fileWriter + ) + h := &header{ + FileHeader: fh, + offset: uint64(w.cw.count), + } + + if strings.HasSuffix(fh.Name, "/") { + // Set the compression method to Store to ensure data length is truly zero, + // which the writeHeader method always encodes for the size fields. + // This is necessary as most compression formats have non-zero lengths + // even when compressing an empty string. + fh.Method = Store + fh.Flags &^= 0x8 // we will not write a data descriptor + + // Explicitly clear sizes as they have no meaning for directories. + fh.CompressedSize = 0 + fh.CompressedSize64 = 0 + fh.UncompressedSize = 0 + fh.UncompressedSize64 = 0 + + ow = dirWriter{} + } else { + fh.Flags |= 0x8 // we will write a data descriptor + + fw = &fileWriter{ + zipw: w.cw, + compCount: &countWriter{w: w.cw}, + crc32: crc32.NewIEEE(), + } + comp := w.compressor(fh.Method) + if comp == nil { + return nil, ErrAlgorithm + } + var err error + fw.comp, err = comp(fw.compCount) + if err != nil { + return nil, err + } + fw.rawCount = &countWriter{w: fw.comp} + fw.header = h + ow = fw + } + w.dir = append(w.dir, h) + if err := writeHeader(w.cw, h); err != nil { + return nil, err + } + // If we're creating a directory, fw is nil. + w.last = fw + return ow, nil +} + +func writeHeader(w io.Writer, h *header) error { + const maxUint16 = 1<<16 - 1 + if len(h.Name) > maxUint16 { + return errLongName + } + if len(h.Extra) > maxUint16 { + return errLongExtra + } + + var buf [fileHeaderLen]byte + b := writeBuf(buf[:]) + b.uint32(uint32(fileHeaderSignature)) + b.uint16(h.ReaderVersion) + b.uint16(h.Flags) + b.uint16(h.Method) + b.uint16(h.ModifiedTime) + b.uint16(h.ModifiedDate) + // In raw mode (caller does the compression), the values are either + // written here or in the trailing data descriptor based on the header + // flags. + if h.raw && !h.hasDataDescriptor() { + b.uint32(h.CRC32) + b.uint32(uint32(min64(h.CompressedSize64, uint32max))) + b.uint32(uint32(min64(h.UncompressedSize64, uint32max))) + } else { + // When this package handle the compression, these values are + // always written to the trailing data descriptor. + b.uint32(0) // crc32 + b.uint32(0) // compressed size + b.uint32(0) // uncompressed size + } + b.uint16(uint16(len(h.Name))) + b.uint16(uint16(len(h.Extra))) + if _, err := w.Write(buf[:]); err != nil { + return err + } + if _, err := io.WriteString(w, h.Name); err != nil { + return err + } + _, err := w.Write(h.Extra) + return err +} + +func min64(x, y uint64) uint64 { + if x < y { + return x + } + return y +} + +// CreateRaw adds a file to the zip archive using the provided FileHeader and +// returns a Writer to which the file contents should be written. The file's +// contents must be written to the io.Writer before the next call to Create, +// CreateHeader, CreateRaw, or Close. +// +// In contrast to CreateHeader, the bytes passed to Writer are not compressed. +func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) { + if err := w.prepare(fh); err != nil { + return nil, err + } + + fh.CompressedSize = uint32(min64(fh.CompressedSize64, uint32max)) + fh.UncompressedSize = uint32(min64(fh.UncompressedSize64, uint32max)) + + h := &header{ + FileHeader: fh, + offset: uint64(w.cw.count), + raw: true, + } + w.dir = append(w.dir, h) + if err := writeHeader(w.cw, h); err != nil { + return nil, err + } + + if strings.HasSuffix(fh.Name, "/") { + w.last = nil + return dirWriter{}, nil + } + + fw := &fileWriter{ + header: h, + zipw: w.cw, + } + w.last = fw + return fw, nil +} + +// Copy copies the file f (obtained from a Reader) into w. It copies the raw +// form directly bypassing decompression, compression, and validation. +func (w *Writer) Copy(f *File) error { + r, err := f.OpenRaw() + if err != nil { + return err + } + fw, err := w.CreateRaw(&f.FileHeader) + if err != nil { + return err + } + _, err = io.Copy(fw, r) + return err +} + +// RegisterCompressor registers or overrides a custom compressor for a specific +// method ID. If a compressor for a given method is not found, Writer will +// default to looking up the compressor at the package level. +func (w *Writer) RegisterCompressor(method uint16, comp Compressor) { + if w.compressors == nil { + w.compressors = make(map[uint16]Compressor) + } + w.compressors[method] = comp +} + +func (w *Writer) compressor(method uint16) Compressor { + comp := w.compressors[method] + if comp == nil { + comp = compressor(method) + } + return comp +} + +type dirWriter struct{} + +func (dirWriter) Write(b []byte) (int, error) { + if len(b) == 0 { + return 0, nil + } + return 0, errors.New("zip: write to directory") +} + +type fileWriter struct { + *header + zipw io.Writer + rawCount *countWriter + comp io.WriteCloser + compCount *countWriter + crc32 hash.Hash32 + closed bool +} + +func (w *fileWriter) Write(p []byte) (int, error) { + if w.closed { + return 0, errors.New("zip: write to closed file") + } + if w.raw { + return w.zipw.Write(p) + } + w.crc32.Write(p) + return w.rawCount.Write(p) +} + +func (w *fileWriter) close() error { + if w.closed { + return errors.New("zip: file closed twice") + } + w.closed = true + if w.raw { + return w.writeDataDescriptor() + } + if err := w.comp.Close(); err != nil { + return err + } + + // update FileHeader + fh := w.header.FileHeader + fh.CRC32 = w.crc32.Sum32() + fh.CompressedSize64 = uint64(w.compCount.count) + fh.UncompressedSize64 = uint64(w.rawCount.count) + + if fh.isZip64() { + fh.CompressedSize = uint32max + fh.UncompressedSize = uint32max + fh.ReaderVersion = zipVersion45 // requires 4.5 - File uses ZIP64 format extensions + } else { + fh.CompressedSize = uint32(fh.CompressedSize64) + fh.UncompressedSize = uint32(fh.UncompressedSize64) + } + + return w.writeDataDescriptor() +} + +func (w *fileWriter) writeDataDescriptor() error { + if !w.hasDataDescriptor() { + return nil + } + // Write data descriptor. This is more complicated than one would + // think, see e.g. comments in zipfile.c:putextended() and + // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588. + // The approach here is to write 8 byte sizes if needed without + // adding a zip64 extra in the local header (too late anyway). + var buf []byte + if w.isZip64() { + buf = make([]byte, dataDescriptor64Len) + } else { + buf = make([]byte, dataDescriptorLen) + } + b := writeBuf(buf) + b.uint32(dataDescriptorSignature) // de-facto standard, required by OS X + b.uint32(w.CRC32) + if w.isZip64() { + b.uint64(w.CompressedSize64) + b.uint64(w.UncompressedSize64) + } else { + b.uint32(w.CompressedSize) + b.uint32(w.UncompressedSize) + } + _, err := w.zipw.Write(buf) + return err +} + +type countWriter struct { + w io.Writer + count int64 +} + +func (w *countWriter) Write(p []byte) (int, error) { + n, err := w.w.Write(p) + w.count += int64(n) + return n, err +} + +type nopCloser struct { + io.Writer +} + +func (w nopCloser) Close() error { + return nil +} + +type writeBuf []byte + +func (b *writeBuf) uint8(v uint8) { + (*b)[0] = v + *b = (*b)[1:] +} + +func (b *writeBuf) uint16(v uint16) { + binary.LittleEndian.PutUint16(*b, v) + *b = (*b)[2:] +} + +func (b *writeBuf) uint32(v uint32) { + binary.LittleEndian.PutUint32(*b, v) + *b = (*b)[4:] +} + +func (b *writeBuf) uint64(v uint64) { + binary.LittleEndian.PutUint64(*b, v) + *b = (*b)[8:] +} diff --git a/vendor/github.com/andybalholm/brotli/README.md b/vendor/github.com/andybalholm/brotli/README.md index 1ea7fdb7..00625211 100644 --- a/vendor/github.com/andybalholm/brotli/README.md +++ b/vendor/github.com/andybalholm/brotli/README.md @@ -2,6 +2,13 @@ This package is a brotli compressor and decompressor implemented in Go. It was translated from the reference implementation (https://github.com/google/brotli) with the `c2go` tool at https://github.com/andybalholm/c2go. +I have been working on new compression algorithms (not translated from C) +in the matchfinder package. +You can use them with the NewWriterV2 function. +Currently they give better results than the old implementation +(at least for compressing my test file, Newton’s *Opticks*) +on levels 2 to 6. + I am using it in production with https://github.com/andybalholm/redwood. API documentation is found at https://pkg.go.dev/github.com/andybalholm/brotli?tab=doc. diff --git a/vendor/github.com/andybalholm/brotli/bitwriter.go b/vendor/github.com/andybalholm/brotli/bitwriter.go new file mode 100644 index 00000000..dfc60360 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/bitwriter.go @@ -0,0 +1,56 @@ +package brotli + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Write bits into a byte array. */ + +type bitWriter struct { + dst []byte + + // Data waiting to be written is the low nbits of bits. + bits uint64 + nbits uint +} + +func (w *bitWriter) writeBits(nb uint, b uint64) { + w.bits |= b << w.nbits + w.nbits += nb + if w.nbits >= 32 { + bits := w.bits + w.bits >>= 32 + w.nbits -= 32 + w.dst = append(w.dst, + byte(bits), + byte(bits>>8), + byte(bits>>16), + byte(bits>>24), + ) + } +} + +func (w *bitWriter) writeSingleBit(bit bool) { + if bit { + w.writeBits(1, 1) + } else { + w.writeBits(1, 0) + } +} + +func (w *bitWriter) jumpToByteBoundary() { + dst := w.dst + for w.nbits != 0 { + dst = append(dst, byte(w.bits)) + w.bits >>= 8 + if w.nbits > 8 { // Avoid underflow + w.nbits -= 8 + } else { + w.nbits = 0 + } + } + w.bits = 0 + w.dst = dst +} diff --git a/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go b/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go index 7acfb180..ee655298 100644 --- a/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go +++ b/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go @@ -7,12 +7,18 @@ import ( const maxHuffmanTreeSize = (2*numCommandSymbols + 1) -/* The maximum size of Huffman dictionary for distances assuming that - NPOSTFIX = 0 and NDIRECT = 0. */ +/* +The maximum size of Huffman dictionary for distances assuming that + + NPOSTFIX = 0 and NDIRECT = 0. +*/ const maxSimpleDistanceAlphabetSize = 140 -/* Represents the range of values belonging to a prefix code: - [offset, offset + 2^nbits) */ +/* +Represents the range of values belonging to a prefix code: + + [offset, offset + 2^nbits) +*/ type prefixCodeRange struct { offset uint32 nbits uint32 @@ -96,9 +102,12 @@ func nextBlockTypeCode(calculator *blockTypeCodeCalculator, type_ byte) uint { return type_code } -/* |nibblesbits| represents the 2 bits to encode MNIBBLES (0-3) - REQUIRES: length > 0 - REQUIRES: length <= (1 << 24) */ +/* +|nibblesbits| represents the 2 bits to encode MNIBBLES (0-3) + + REQUIRES: length > 0 + REQUIRES: length <= (1 << 24) +*/ func encodeMlen(length uint, bits *uint64, numbits *uint, nibblesbits *uint64) { var lg uint if length == 1 { @@ -132,8 +141,11 @@ func storeCommandExtra(cmd *command, storage_ix *uint, storage []byte) { writeBits(uint(insnumextra+getCopyExtra(copycode)), bits, storage_ix, storage) } -/* Data structure that stores almost everything that is needed to encode each - block switch command. */ +/* +Data structure that stores almost everything that is needed to encode each + + block switch command. +*/ type blockSplitCode struct { type_code_calculator blockTypeCodeCalculator type_depths [maxBlockTypeSymbols]byte @@ -154,9 +166,12 @@ func storeVarLenUint8(n uint, storage_ix *uint, storage []byte) { } } -/* Stores the compressed meta-block header. - REQUIRES: length > 0 - REQUIRES: length <= (1 << 24) */ +/* +Stores the compressed meta-block header. + + REQUIRES: length > 0 + REQUIRES: length <= (1 << 24) +*/ func storeCompressedMetaBlockHeader(is_final_block bool, length uint, storage_ix *uint, storage []byte) { var lenbits uint64 var nlenbits uint @@ -186,9 +201,12 @@ func storeCompressedMetaBlockHeader(is_final_block bool, length uint, storage_ix } } -/* Stores the uncompressed meta-block header. - REQUIRES: length > 0 - REQUIRES: length <= (1 << 24) */ +/* +Stores the uncompressed meta-block header. + + REQUIRES: length > 0 + REQUIRES: length <= (1 << 24) +*/ func storeUncompressedMetaBlockHeader(length uint, storage_ix *uint, storage []byte) { var lenbits uint64 var nlenbits uint @@ -312,8 +330,11 @@ func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max } } -/* num = alphabet size - depths = symbol depths */ +/* +num = alphabet size + + depths = symbol depths +*/ func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, storage_ix *uint, storage []byte) { var huffman_tree [numCommandSymbols]byte var huffman_tree_extra_bits [numCommandSymbols]byte @@ -367,8 +388,11 @@ func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, storage_ix *u storeHuffmanTreeToBitMask(huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:], code_length_bitdepth[:], code_length_bitdepth_symbols[:], storage_ix, storage) } -/* Builds a Huffman tree from histogram[0:length] into depth[0:length] and - bits[0:length] and stores the encoded tree to the bit stream. */ +/* +Builds a Huffman tree from histogram[0:length] into depth[0:length] and + + bits[0:length] and stores the encoded tree to the bit stream. +*/ func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabet_size uint, tree []huffmanTree, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { var count uint = 0 var s4 = [4]uint{0} @@ -623,6 +647,203 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_ } } +func buildAndStoreHuffmanTreeFastBW(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, bw *bitWriter) { + var count uint = 0 + var symbols = [4]uint{0} + var length uint = 0 + var total uint = histogram_total + for total != 0 { + if histogram[length] != 0 { + if count < 4 { + symbols[count] = length + } + + count++ + total -= uint(histogram[length]) + } + + length++ + } + + if count <= 1 { + bw.writeBits(4, 1) + bw.writeBits(max_bits, uint64(symbols[0])) + depth[symbols[0]] = 0 + bits[symbols[0]] = 0 + return + } + + for i := 0; i < int(length); i++ { + depth[i] = 0 + } + { + var max_tree_size uint = 2*length + 1 + tree, _ := huffmanTreePool.Get().(*[]huffmanTree) + if tree == nil || cap(*tree) < int(max_tree_size) { + tmp := make([]huffmanTree, max_tree_size) + tree = &tmp + } else { + *tree = (*tree)[:max_tree_size] + } + var count_limit uint32 + for count_limit = 1; ; count_limit *= 2 { + var node int = 0 + var l uint + for l = length; l != 0; { + l-- + if histogram[l] != 0 { + if histogram[l] >= count_limit { + initHuffmanTree(&(*tree)[node:][0], histogram[l], -1, int16(l)) + } else { + initHuffmanTree(&(*tree)[node:][0], count_limit, -1, int16(l)) + } + + node++ + } + } + { + var n int = node + /* Points to the next leaf node. */ /* Points to the next non-leaf node. */ + var sentinel huffmanTree + var i int = 0 + var j int = n + 1 + var k int + + sortHuffmanTreeItems(*tree, uint(n), huffmanTreeComparator(sortHuffmanTree1)) + + /* The nodes are: + [0, n): the sorted leaf nodes that we start with. + [n]: we add a sentinel here. + [n + 1, 2n): new parent nodes are added here, starting from + (n+1). These are naturally in ascending order. + [2n]: we add a sentinel at the end as well. + There will be (2n+1) elements at the end. */ + initHuffmanTree(&sentinel, math.MaxUint32, -1, -1) + + (*tree)[node] = sentinel + node++ + (*tree)[node] = sentinel + node++ + + for k = n - 1; k > 0; k-- { + var left int + var right int + if (*tree)[i].total_count_ <= (*tree)[j].total_count_ { + left = i + i++ + } else { + left = j + j++ + } + + if (*tree)[i].total_count_ <= (*tree)[j].total_count_ { + right = i + i++ + } else { + right = j + j++ + } + + /* The sentinel node becomes the parent node. */ + (*tree)[node-1].total_count_ = (*tree)[left].total_count_ + (*tree)[right].total_count_ + + (*tree)[node-1].index_left_ = int16(left) + (*tree)[node-1].index_right_or_value_ = int16(right) + + /* Add back the last sentinel node. */ + (*tree)[node] = sentinel + node++ + } + + if setDepth(2*n-1, *tree, depth, 14) { + /* We need to pack the Huffman tree in 14 bits. If this was not + successful, add fake entities to the lowest values and retry. */ + break + } + } + } + + huffmanTreePool.Put(tree) + } + + convertBitDepthsToSymbols(depth, length, bits) + if count <= 4 { + var i uint + + /* value of 1 indicates a simple Huffman code */ + bw.writeBits(2, 1) + + bw.writeBits(2, uint64(count)-1) /* NSYM - 1 */ + + /* Sort */ + for i = 0; i < count; i++ { + var j uint + for j = i + 1; j < count; j++ { + if depth[symbols[j]] < depth[symbols[i]] { + var tmp uint = symbols[j] + symbols[j] = symbols[i] + symbols[i] = tmp + } + } + } + + if count == 2 { + bw.writeBits(max_bits, uint64(symbols[0])) + bw.writeBits(max_bits, uint64(symbols[1])) + } else if count == 3 { + bw.writeBits(max_bits, uint64(symbols[0])) + bw.writeBits(max_bits, uint64(symbols[1])) + bw.writeBits(max_bits, uint64(symbols[2])) + } else { + bw.writeBits(max_bits, uint64(symbols[0])) + bw.writeBits(max_bits, uint64(symbols[1])) + bw.writeBits(max_bits, uint64(symbols[2])) + bw.writeBits(max_bits, uint64(symbols[3])) + + /* tree-select */ + bw.writeSingleBit(depth[symbols[0]] == 1) + } + } else { + var previous_value byte = 8 + var i uint + + /* Complex Huffman Tree */ + storeStaticCodeLengthCodeBW(bw) + + /* Actual RLE coding. */ + for i = 0; i < length; { + var value byte = depth[i] + var reps uint = 1 + var k uint + for k = i + 1; k < length && depth[k] == value; k++ { + reps++ + } + + i += reps + if value == 0 { + bw.writeBits(uint(kZeroRepsDepth[reps]), kZeroRepsBits[reps]) + } else { + if previous_value != value { + bw.writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value])) + reps-- + } + + if reps < 3 { + for reps != 0 { + reps-- + bw.writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value])) + } + } else { + reps -= 3 + bw.writeBits(uint(kNonZeroRepsDepth[reps]), kNonZeroRepsBits[reps]) + } + + previous_value = value + } + } + } +} + func indexOf(v []byte, v_size uint, value byte) uint { var i uint = 0 for ; i < v_size; i++ { @@ -674,12 +895,15 @@ func moveToFrontTransform(v_in []uint32, v_size uint, v_out []uint32) { } } -/* Finds runs of zeros in v[0..in_size) and replaces them with a prefix code of - the run length plus extra bits (lower 9 bits is the prefix code and the rest - are the extra bits). Non-zero values in v[] are shifted by - *max_length_prefix. Will not create prefix codes bigger than the initial - value of *max_run_length_prefix. The prefix code of run length L is simply - Log2Floor(L) and the number of extra bits is the same as the prefix code. */ +/* +Finds runs of zeros in v[0..in_size) and replaces them with a prefix code of + + the run length plus extra bits (lower 9 bits is the prefix code and the rest + are the extra bits). Non-zero values in v[] are shifted by + *max_length_prefix. Will not create prefix codes bigger than the initial + value of *max_run_length_prefix. The prefix code of run length L is simply + Log2Floor(L) and the number of extra bits is the same as the prefix code. +*/ func runLengthCodeZeros(in_size uint, v []uint32, out_size *uint, max_run_length_prefix *uint32) { var max_reps uint32 = 0 var i uint @@ -799,8 +1023,11 @@ func storeBlockSwitch(code *blockSplitCode, block_len uint32, block_type byte, i writeBits(uint(len_nextra), uint64(len_extra), storage_ix, storage) } -/* Builds a BlockSplitCode data structure from the block split given by the - vector of block types and block lengths and stores it to the bit stream. */ +/* +Builds a BlockSplitCode data structure from the block split given by the + + vector of block types and block lengths and stores it to the bit stream. +*/ func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint, num_types uint, tree []huffmanTree, code *blockSplitCode, storage_ix *uint, storage []byte) { var type_histo [maxBlockTypeSymbols]uint32 var length_histo [numBlockLenSymbols]uint32 @@ -919,14 +1146,20 @@ func cleanupBlockEncoder(self *blockEncoder) { blockEncoderPool.Put(self) } -/* Creates entropy codes of block lengths and block types and stores them - to the bit stream. */ +/* +Creates entropy codes of block lengths and block types and stores them + + to the bit stream. +*/ func buildAndStoreBlockSwitchEntropyCodes(self *blockEncoder, tree []huffmanTree, storage_ix *uint, storage []byte) { buildAndStoreBlockSplitCode(self.block_types_, self.block_lengths_, self.num_blocks_, self.num_block_types_, tree, &self.block_split_code_, storage_ix, storage) } -/* Stores the next symbol with the entropy code of the current block type. - Updates the block type and block length at block boundaries. */ +/* +Stores the next symbol with the entropy code of the current block type. + + Updates the block type and block length at block boundaries. +*/ func storeSymbol(self *blockEncoder, symbol uint, storage_ix *uint, storage []byte) { if self.block_len_ == 0 { self.block_ix_++ @@ -945,9 +1178,12 @@ func storeSymbol(self *blockEncoder, symbol uint, storage_ix *uint, storage []by } } -/* Stores the next symbol with the entropy code of the current block type and - context value. - Updates the block type and block length at block boundaries. */ +/* +Stores the next symbol with the entropy code of the current block type and + + context value. + Updates the block type and block length at block boundaries. +*/ func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, context_map []uint32, storage_ix *uint, storage []byte, context_bits uint) { if self.block_len_ == 0 { self.block_ix_++ @@ -1268,8 +1504,11 @@ func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is } } -/* This is for storing uncompressed blocks (simple raw storage of - bytes-as-bytes). */ +/* +This is for storing uncompressed blocks (simple raw storage of + + bytes-as-bytes). +*/ func storeUncompressedMetaBlock(is_final_block bool, input []byte, position uint, mask uint, len uint, storage_ix *uint, storage []byte) { var masked_pos uint = position & mask storeUncompressedMetaBlockHeader(uint(len), storage_ix, storage) diff --git a/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go b/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go index 172dc7f4..79f9c7fd 100644 --- a/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go +++ b/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go @@ -39,8 +39,11 @@ func isMatch1(p1 []byte, p2 []byte, length uint) bool { return p1[4] == p2[4] && p1[5] == p2[5] } -/* Builds a command and distance prefix code (each 64 symbols) into "depth" and - "bits" based on "histogram" and stores it into the bit stream. */ +/* +Builds a command and distance prefix code (each 64 symbols) into "depth" and + + "bits" based on "histogram" and stores it into the bit stream. +*/ func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { var tree [129]huffmanTree var cmd_depth = [numCommandSymbols]byte{0} @@ -216,6 +219,25 @@ func storeMetaBlockHeader(len uint, is_uncompressed bool, storage_ix *uint, stor writeSingleBit(is_uncompressed, storage_ix, storage) } +func storeMetaBlockHeaderBW(len uint, is_uncompressed bool, bw *bitWriter) { + var nibbles uint = 6 + + /* ISLAST */ + bw.writeBits(1, 0) + + if len <= 1<<16 { + nibbles = 4 + } else if len <= 1<<20 { + nibbles = 5 + } + + bw.writeBits(2, uint64(nibbles)-4) + bw.writeBits(nibbles*4, uint64(len)-1) + + /* ISUNCOMPRESSED */ + bw.writeSingleBit(is_uncompressed) +} + func createCommands(input []byte, block_size uint, input_size uint, base_ip_ptr []byte, table []int, table_bits uint, min_match uint, literals *[]byte, commands *[]uint32) { var ip int = 0 var shift uint = 64 - table_bits @@ -710,19 +732,22 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co } } -/* Compresses "input" string to the "*storage" buffer as one or more complete - meta-blocks, and updates the "*storage_ix" bit position. +/* +Compresses "input" string to the "*storage" buffer as one or more complete - If "is_last" is 1, emits an additional empty last meta-block. + meta-blocks, and updates the "*storage_ix" bit position. - REQUIRES: "input_size" is greater than zero, or "is_last" is 1. - REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). - REQUIRES: "command_buf" and "literal_buf" point to at least - kCompressFragmentTwoPassBlockSize long arrays. - REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. - REQUIRES: "table_size" is a power of two - OUTPUT: maximal copy distance <= |input_size| - OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */ + If "is_last" is 1, emits an additional empty last meta-block. + + REQUIRES: "input_size" is greater than zero, or "is_last" is 1. + REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). + REQUIRES: "command_buf" and "literal_buf" point to at least + kCompressFragmentTwoPassBlockSize long arrays. + REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. + REQUIRES: "table_size" is a power of two + OUTPUT: maximal copy distance <= |input_size| + OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) +*/ func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, storage_ix *uint, storage []byte) { var initial_storage_ix uint = *storage_ix var table_bits uint = uint(log2FloorNonZero(table_size)) diff --git a/vendor/github.com/andybalholm/brotli/decode.go b/vendor/github.com/andybalholm/brotli/decode.go index 6a73b88a..9d9513b7 100644 --- a/vendor/github.com/andybalholm/brotli/decode.go +++ b/vendor/github.com/andybalholm/brotli/decode.go @@ -1304,26 +1304,21 @@ func wrapRingBuffer(s *Reader) { Last two bytes of ring-buffer are initialized to 0, so context calculation could be done uniformly for the first two and all other positions. */ func ensureRingBuffer(s *Reader) bool { - var old_ringbuffer []byte = s.ringbuffer + var old_ringbuffer []byte if s.ringbuffer_size == s.new_ringbuffer_size { return true } - - s.ringbuffer = make([]byte, uint(s.new_ringbuffer_size)+uint(kRingBufferWriteAheadSlack)) - if s.ringbuffer == nil { - /* Restore previous value. */ - s.ringbuffer = old_ringbuffer - - return false + spaceNeeded := int(s.new_ringbuffer_size) + int(kRingBufferWriteAheadSlack) + if len(s.ringbuffer) < spaceNeeded { + old_ringbuffer = s.ringbuffer + s.ringbuffer = make([]byte, spaceNeeded) } s.ringbuffer[s.new_ringbuffer_size-2] = 0 s.ringbuffer[s.new_ringbuffer_size-1] = 0 - if !(old_ringbuffer == nil) { + if old_ringbuffer != nil { copy(s.ringbuffer, old_ringbuffer[:uint(s.pos)]) - - old_ringbuffer = nil } s.ringbuffer_size = s.new_ringbuffer_size diff --git a/vendor/github.com/andybalholm/brotli/encoder.go b/vendor/github.com/andybalholm/brotli/encoder.go new file mode 100644 index 00000000..19283825 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/encoder.go @@ -0,0 +1,177 @@ +package brotli + +import "github.com/andybalholm/brotli/matchfinder" + +// An Encoder implements the matchfinder.Encoder interface, writing in Brotli format. +type Encoder struct { + wroteHeader bool + bw bitWriter + distCache []distanceCode +} + +func (e *Encoder) Reset() { + e.wroteHeader = false + e.bw = bitWriter{} +} + +func (e *Encoder) Encode(dst []byte, src []byte, matches []matchfinder.Match, lastBlock bool) []byte { + e.bw.dst = dst + if !e.wroteHeader { + e.bw.writeBits(4, 15) + e.wroteHeader = true + } + + if len(src) == 0 { + if lastBlock { + e.bw.writeBits(2, 3) // islast + isempty + e.bw.jumpToByteBoundary() + return e.bw.dst + } + return dst + } + + var literalHisto [256]uint32 + var commandHisto [704]uint32 + var distanceHisto [64]uint32 + literalCount := 0 + commandCount := 0 + distanceCount := 0 + + if len(e.distCache) < len(matches) { + e.distCache = make([]distanceCode, len(matches)) + } + + // first pass: build the histograms + pos := 0 + + // d is the ring buffer of the last 4 distances. + d := [4]int{-10, -10, -10, -10} + for i, m := range matches { + if m.Unmatched > 0 { + for _, c := range src[pos : pos+m.Unmatched] { + literalHisto[c]++ + } + literalCount += m.Unmatched + } + + insertCode := getInsertLengthCode(uint(m.Unmatched)) + copyCode := getCopyLengthCode(uint(m.Length)) + if m.Length == 0 { + // If the stream ends with unmatched bytes, we need a dummy copy length. + copyCode = 2 + } + command := combineLengthCodes(insertCode, copyCode, false) + commandHisto[command]++ + commandCount++ + + if command >= 128 && m.Length != 0 { + var distCode distanceCode + switch m.Distance { + case d[3]: + distCode.code = 0 + case d[2]: + distCode.code = 1 + case d[1]: + distCode.code = 2 + case d[0]: + distCode.code = 3 + case d[3] - 1: + distCode.code = 4 + case d[3] + 1: + distCode.code = 5 + case d[3] - 2: + distCode.code = 6 + case d[3] + 2: + distCode.code = 7 + case d[3] - 3: + distCode.code = 8 + case d[3] + 3: + distCode.code = 9 + + // In my testing, codes 10–15 actually reduced the compression ratio. + + default: + distCode = getDistanceCode(m.Distance) + } + e.distCache[i] = distCode + distanceHisto[distCode.code]++ + distanceCount++ + if distCode.code != 0 { + d[0], d[1], d[2], d[3] = d[1], d[2], d[3], m.Distance + } + } + + pos += m.Unmatched + m.Length + } + + storeMetaBlockHeaderBW(uint(len(src)), false, &e.bw) + e.bw.writeBits(13, 0) + + var literalDepths [256]byte + var literalBits [256]uint16 + buildAndStoreHuffmanTreeFastBW(literalHisto[:], uint(literalCount), 8, literalDepths[:], literalBits[:], &e.bw) + + var commandDepths [704]byte + var commandBits [704]uint16 + buildAndStoreHuffmanTreeFastBW(commandHisto[:], uint(commandCount), 10, commandDepths[:], commandBits[:], &e.bw) + + var distanceDepths [64]byte + var distanceBits [64]uint16 + buildAndStoreHuffmanTreeFastBW(distanceHisto[:], uint(distanceCount), 6, distanceDepths[:], distanceBits[:], &e.bw) + + pos = 0 + for i, m := range matches { + insertCode := getInsertLengthCode(uint(m.Unmatched)) + copyCode := getCopyLengthCode(uint(m.Length)) + if m.Length == 0 { + // If the stream ends with unmatched bytes, we need a dummy copy length. + copyCode = 2 + } + command := combineLengthCodes(insertCode, copyCode, false) + e.bw.writeBits(uint(commandDepths[command]), uint64(commandBits[command])) + if kInsExtra[insertCode] > 0 { + e.bw.writeBits(uint(kInsExtra[insertCode]), uint64(m.Unmatched)-uint64(kInsBase[insertCode])) + } + if kCopyExtra[copyCode] > 0 { + e.bw.writeBits(uint(kCopyExtra[copyCode]), uint64(m.Length)-uint64(kCopyBase[copyCode])) + } + + if m.Unmatched > 0 { + for _, c := range src[pos : pos+m.Unmatched] { + e.bw.writeBits(uint(literalDepths[c]), uint64(literalBits[c])) + } + } + + if command >= 128 && m.Length != 0 { + distCode := e.distCache[i] + e.bw.writeBits(uint(distanceDepths[distCode.code]), uint64(distanceBits[distCode.code])) + if distCode.nExtra > 0 { + e.bw.writeBits(distCode.nExtra, distCode.extraBits) + } + } + + pos += m.Unmatched + m.Length + } + + if lastBlock { + e.bw.writeBits(2, 3) // islast + isempty + e.bw.jumpToByteBoundary() + } + return e.bw.dst +} + +type distanceCode struct { + code int + nExtra uint + extraBits uint64 +} + +func getDistanceCode(distance int) distanceCode { + d := distance + 3 + nbits := log2FloorNonZero(uint(d)) - 1 + prefix := (d >> nbits) & 1 + offset := (2 + prefix) << nbits + distcode := int(2*(nbits-1)) + prefix + 16 + extra := d - offset + return distanceCode{distcode, uint(nbits), uint64(extra)} +} diff --git a/vendor/github.com/andybalholm/brotli/entropy_encode_static.go b/vendor/github.com/andybalholm/brotli/entropy_encode_static.go index 5ddf3fcb..294aff4f 100644 --- a/vendor/github.com/andybalholm/brotli/entropy_encode_static.go +++ b/vendor/github.com/andybalholm/brotli/entropy_encode_static.go @@ -782,6 +782,11 @@ func storeStaticCodeLengthCode(storage_ix *uint, storage []byte) { writeBits(40, 0x0000FF55555554, storage_ix, storage) } +func storeStaticCodeLengthCodeBW(bw *bitWriter) { + bw.writeBits(32, 0x55555554) + bw.writeBits(8, 0xFF) +} + var kZeroRepsBits = [numCommandSymbols]uint64{ 0x00000000, 0x00000000, diff --git a/vendor/github.com/andybalholm/brotli/http.go b/vendor/github.com/andybalholm/brotli/http.go index 1e981963..3d3a8a06 100644 --- a/vendor/github.com/andybalholm/brotli/http.go +++ b/vendor/github.com/andybalholm/brotli/http.go @@ -11,15 +11,7 @@ import ( // the Accept-Encoding header, sets the Content-Encoding header, and returns a // WriteCloser that implements that compression. The Close method must be called // before the current HTTP handler returns. -// -// Due to https://github.com/golang/go/issues/31753, the response will not be -// compressed unless you set a Content-Type header before you call -// HTTPCompressor. func HTTPCompressor(w http.ResponseWriter, r *http.Request) io.WriteCloser { - if w.Header().Get("Content-Type") == "" { - return nopCloser{w} - } - if w.Header().Get("Vary") == "" { w.Header().Set("Vary", "Accept-Encoding") } @@ -28,7 +20,7 @@ func HTTPCompressor(w http.ResponseWriter, r *http.Request) io.WriteCloser { switch encoding { case "br": w.Header().Set("Content-Encoding", "br") - return NewWriter(w) + return NewWriterV2(w, DefaultCompression) case "gzip": w.Header().Set("Content-Encoding", "gzip") return gzip.NewWriter(w) diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/emitter.go b/vendor/github.com/andybalholm/brotli/matchfinder/emitter.go new file mode 100644 index 00000000..507d1cae --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/emitter.go @@ -0,0 +1,34 @@ +package matchfinder + +// An absoluteMatch is like a Match, but it stores indexes into the byte +// stream instead of lengths. +type absoluteMatch struct { + // Start is the index of the first byte. + Start int + + // End is the index of the byte after the last byte + // (so that End - Start = Length). + End int + + // Match is the index of the previous data that matches + // (Start - Match = Distance). + Match int +} + +// A matchEmitter manages the output of matches for a MatchFinder. +type matchEmitter struct { + // Dst is the destination slice that Matches are added to. + Dst []Match + + // NextEmit is the index of the next byte to emit. + NextEmit int +} + +func (e *matchEmitter) emit(m absoluteMatch) { + e.Dst = append(e.Dst, Match{ + Unmatched: m.Start - e.NextEmit, + Length: m.End - m.Start, + Distance: m.Start - m.Match, + }) + e.NextEmit = m.End +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/m0.go b/vendor/github.com/andybalholm/brotli/matchfinder/m0.go new file mode 100644 index 00000000..773b7c49 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/m0.go @@ -0,0 +1,169 @@ +package matchfinder + +import ( + "encoding/binary" +) + +// M0 is an implementation of the MatchFinder interface based +// on the algorithm used by snappy, but modified to be more like the algorithm +// used by compression level 0 of the brotli reference implementation. +// +// It has a maximum block size of 65536 bytes. +type M0 struct { + // Lazy turns on "lazy matching," for higher compression but less speed. + Lazy bool + + MaxDistance int + MaxLength int +} + +func (M0) Reset() {} + +const ( + m0HashLen = 5 + + m0TableBits = 14 + m0TableSize = 1 << m0TableBits + m0Shift = 32 - m0TableBits + // m0TableMask is redundant, but helps the compiler eliminate bounds + // checks. + m0TableMask = m0TableSize - 1 +) + +func (m M0) hash(data uint64) uint64 { + hash := (data << (64 - 8*m0HashLen)) * hashMul64 + return hash >> (64 - m0TableBits) +} + +// FindMatches looks for matches in src, appends them to dst, and returns dst. +// src must not be longer than 65536 bytes. +func (m M0) FindMatches(dst []Match, src []byte) []Match { + const inputMargin = 16 - 1 + const minNonLiteralBlockSize = 1 + 1 + inputMargin + + if len(src) < minNonLiteralBlockSize { + dst = append(dst, Match{ + Unmatched: len(src), + }) + return dst + } + if len(src) > 65536 { + panic("block too long") + } + + var table [m0TableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := m.hash(binary.LittleEndian.Uint64(src[s:])) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&m0TableMask]) + table[nextHash&m0TableMask] = uint16(s) + nextHash = m.hash(binary.LittleEndian.Uint64(src[nextS:])) + if m.MaxDistance != 0 && s-candidate > m.MaxDistance { + continue + } + if binary.LittleEndian.Uint32(src[s:]) == binary.LittleEndian.Uint32(src[candidate:]) { + break + } + } + + // Invariant: we have a 4-byte match at s. + base := s + s = extendMatch(src, candidate+4, s+4) + + origBase := base + if m.Lazy && base+1 < sLimit { + newBase := base + 1 + h := m.hash(binary.LittleEndian.Uint64(src[newBase:])) + newCandidate := int(table[h&m0TableMask]) + table[h&m0TableMask] = uint16(newBase) + okDistance := true + if m.MaxDistance != 0 && newBase-newCandidate > m.MaxDistance { + okDistance = false + } + if okDistance && binary.LittleEndian.Uint32(src[newBase:]) == binary.LittleEndian.Uint32(src[newCandidate:]) { + newS := extendMatch(src, newCandidate+4, newBase+4) + if newS-newBase > s-base+1 { + s = newS + base = newBase + candidate = newCandidate + } + } + } + + if m.MaxLength != 0 && s-base > m.MaxLength { + s = base + m.MaxLength + } + dst = append(dst, Match{ + Unmatched: base - nextEmit, + Length: s - base, + Distance: base - candidate, + }) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if m.Lazy { + // If lazy matching is enabled, we update the hash table for + // every byte in the match. + for i := origBase + 2; i < s-1; i++ { + x := binary.LittleEndian.Uint64(src[i:]) + table[m.hash(x)&m0TableMask] = uint16(i) + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := binary.LittleEndian.Uint64(src[s-1:]) + prevHash := m.hash(x >> 0) + table[prevHash&m0TableMask] = uint16(s - 1) + nextHash = m.hash(x >> 8) + } + +emitRemainder: + if nextEmit < len(src) { + dst = append(dst, Match{ + Unmatched: len(src) - nextEmit, + }) + } + return dst +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/m4.go b/vendor/github.com/andybalholm/brotli/matchfinder/m4.go new file mode 100644 index 00000000..81894725 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/m4.go @@ -0,0 +1,308 @@ +package matchfinder + +import ( + "encoding/binary" + "math/bits" + "runtime" +) + +// M4 is an implementation of the MatchFinder +// interface that uses a hash table to find matches, +// optional match chains, +// and the advanced parsing technique from +// https://fastcompression.blogspot.com/2011/12/advanced-parsing-strategies.html. +type M4 struct { + // MaxDistance is the maximum distance (in bytes) to look back for + // a match. The default is 65535. + MaxDistance int + + // MinLength is the length of the shortest match to return. + // The default is 4. + MinLength int + + // HashLen is the number of bytes to use to calculate the hashes. + // The maximum is 8 and the default is 6. + HashLen int + + // TableBits is the number of bits in the hash table indexes. + // The default is 17 (128K entries). + TableBits int + + // ChainLength is how many entries to search on the "match chain" of older + // locations with the same hash as the current location. + ChainLength int + + // DistanceBitCost is used when comparing two matches to see + // which is better. The comparison is primarily based on the length + // of the matches, but it can also take the distance into account, + // in terms of the number of bits needed to represent the distance. + // One byte of length is given a score of 256, so 32 (256/8) would + // be a reasonable first guess for the value of one bit. + // (The default is 0, which bases the comparison solely on length.) + DistanceBitCost int + + table []uint32 + chain []uint16 + + history []byte +} + +func (q *M4) Reset() { + for i := range q.table { + q.table[i] = 0 + } + q.history = q.history[:0] + q.chain = q.chain[:0] +} + +func (q *M4) score(m absoluteMatch) int { + return (m.End-m.Start)*256 + (bits.LeadingZeros32(uint32(m.Start-m.Match))-32)*q.DistanceBitCost +} + +func (q *M4) FindMatches(dst []Match, src []byte) []Match { + if q.MaxDistance == 0 { + q.MaxDistance = 65535 + } + if q.MinLength == 0 { + q.MinLength = 4 + } + if q.HashLen == 0 { + q.HashLen = 6 + } + if q.TableBits == 0 { + q.TableBits = 17 + } + if len(q.table) < 1< q.MaxDistance*2 { + // Trim down the history buffer. + delta := len(q.history) - q.MaxDistance + copy(q.history, q.history[delta:]) + q.history = q.history[:q.MaxDistance] + if q.ChainLength > 0 { + q.chain = q.chain[:q.MaxDistance] + } + + for i, v := range q.table { + newV := int(v) - delta + if newV < 0 { + newV = 0 + } + q.table[i] = uint32(newV) + } + } + + // Append src to the history buffer. + e.NextEmit = len(q.history) + q.history = append(q.history, src...) + if q.ChainLength > 0 { + q.chain = append(q.chain, make([]uint16, len(src))...) + } + src = q.history + + // matches stores the matches that have been found but not emitted, + // in reverse order. (matches[0] is the most recent one.) + var matches [3]absoluteMatch + for i := e.NextEmit; i < len(src)-7; i++ { + if matches[0] != (absoluteMatch{}) && i >= matches[0].End { + // We have found some matches, and we're far enough along that we probably + // won't find overlapping matches, so we might as well emit them. + if matches[1] != (absoluteMatch{}) { + if matches[1].End > matches[0].Start { + matches[1].End = matches[0].Start + } + if matches[1].End-matches[1].Start >= q.MinLength && q.score(matches[1]) > 0 { + e.emit(matches[1]) + } + } + e.emit(matches[0]) + matches = [3]absoluteMatch{} + } + + // Calculate and store the hash. + h := ((binary.LittleEndian.Uint64(src[i:]) & (1<<(8*q.HashLen) - 1)) * hashMul64) >> (64 - q.TableBits) + candidate := int(q.table[h]) + q.table[h] = uint32(i) + if q.ChainLength > 0 && candidate != 0 { + delta := i - candidate + if delta < 1<<16 { + q.chain[i] = uint16(delta) + } + } + + if i < matches[0].End && i != matches[0].End+2-q.HashLen { + continue + } + if candidate == 0 || i-candidate > q.MaxDistance { + continue + } + + // Look for a match. + var currentMatch absoluteMatch + + if binary.LittleEndian.Uint32(src[candidate:]) == binary.LittleEndian.Uint32(src[i:]) { + m := extendMatch2(src, i, candidate, e.NextEmit) + if m.End-m.Start > q.MinLength && q.score(m) > 0 { + currentMatch = m + } + } + + for j := 0; j < q.ChainLength; j++ { + delta := q.chain[candidate] + if delta == 0 { + break + } + candidate -= int(delta) + if candidate <= 0 || i-candidate > q.MaxDistance { + break + } + if binary.LittleEndian.Uint32(src[candidate:]) == binary.LittleEndian.Uint32(src[i:]) { + m := extendMatch2(src, i, candidate, e.NextEmit) + if m.End-m.Start > q.MinLength && q.score(m) > q.score(currentMatch) { + currentMatch = m + } + } + } + + if currentMatch.End-currentMatch.Start < q.MinLength { + continue + } + + overlapPenalty := 0 + if matches[0] != (absoluteMatch{}) { + overlapPenalty = 275 + if currentMatch.Start <= matches[1].End { + // This match would completely replace the previous match, + // so there is no penalty for overlap. + overlapPenalty = 0 + } + } + + if q.score(currentMatch) <= q.score(matches[0])+overlapPenalty { + continue + } + + matches = [3]absoluteMatch{ + currentMatch, + matches[0], + matches[1], + } + + if matches[2] == (absoluteMatch{}) { + continue + } + + // We have three matches, so it's time to emit one and/or eliminate one. + switch { + case matches[0].Start < matches[2].End: + // The first and third matches overlap; discard the one in between. + matches = [3]absoluteMatch{ + matches[0], + matches[2], + absoluteMatch{}, + } + + case matches[0].Start < matches[2].End+q.MinLength: + // The first and third matches don't overlap, but there's no room for + // another match between them. Emit the first match and discard the second. + e.emit(matches[2]) + matches = [3]absoluteMatch{ + matches[0], + absoluteMatch{}, + absoluteMatch{}, + } + + default: + // Emit the first match, shortening it if necessary to avoid overlap with the second. + if matches[2].End > matches[1].Start { + matches[2].End = matches[1].Start + } + if matches[2].End-matches[2].Start >= q.MinLength && q.score(matches[2]) > 0 { + e.emit(matches[2]) + } + matches[2] = absoluteMatch{} + } + } + + // We've found all the matches now; emit the remaining ones. + if matches[1] != (absoluteMatch{}) { + if matches[1].End > matches[0].Start { + matches[1].End = matches[0].Start + } + if matches[1].End-matches[1].Start >= q.MinLength && q.score(matches[1]) > 0 { + e.emit(matches[1]) + } + } + if matches[0] != (absoluteMatch{}) { + e.emit(matches[0]) + } + + dst = e.Dst + if e.NextEmit < len(src) { + dst = append(dst, Match{ + Unmatched: len(src) - e.NextEmit, + }) + } + + return dst +} + +const hashMul64 = 0x1E35A7BD1E35A7BD + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + switch runtime.GOARCH { + case "amd64": + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + for j+8 < len(src) { + iBytes := binary.LittleEndian.Uint64(src[i:]) + jBytes := binary.LittleEndian.Uint64(src[j:]) + if iBytes != jBytes { + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + return j + bits.TrailingZeros64(iBytes^jBytes)>>3 + } + i, j = i+8, j+8 + } + case "386": + // On a 32-bit CPU, we do it 4 bytes at a time. + for j+4 < len(src) { + iBytes := binary.LittleEndian.Uint32(src[i:]) + jBytes := binary.LittleEndian.Uint32(src[j:]) + if iBytes != jBytes { + return j + bits.TrailingZeros32(iBytes^jBytes)>>3 + } + i, j = i+4, j+4 + } + } + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +// Given a 4-byte match at src[start] and src[candidate], extendMatch2 extends it +// upward as far as possible, and downward no farther than to min. +func extendMatch2(src []byte, start, candidate, min int) absoluteMatch { + end := extendMatch(src, candidate+4, start+4) + for start > min && candidate > 0 && src[start-1] == src[candidate-1] { + start-- + candidate-- + } + return absoluteMatch{ + Start: start, + End: end, + Match: candidate, + } +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go b/vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go new file mode 100644 index 00000000..f6bcfdb3 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go @@ -0,0 +1,103 @@ +// The matchfinder package defines reusable components for data compression. +// +// Many compression libraries have two main parts: +// - Something that looks for repeated sequences of bytes +// - An encoder for the compressed data format (often an entropy coder) +// +// Although these are logically two separate steps, the implementations are +// usually closely tied together. You can't use flate's matcher with snappy's +// encoder, for example. This package defines interfaces and an intermediate +// representation to allow mixing and matching compression components. +package matchfinder + +import "io" + +// A Match is the basic unit of LZ77 compression. +type Match struct { + Unmatched int // the number of unmatched bytes since the previous match + Length int // the number of bytes in the matched string; it may be 0 at the end of the input + Distance int // how far back in the stream to copy from +} + +// A MatchFinder performs the LZ77 stage of compression, looking for matches. +type MatchFinder interface { + // FindMatches looks for matches in src, appends them to dst, and returns dst. + FindMatches(dst []Match, src []byte) []Match + + // Reset clears any internal state, preparing the MatchFinder to be used with + // a new stream. + Reset() +} + +// An Encoder encodes the data in its final format. +type Encoder interface { + // Encode appends the encoded format of src to dst, using the match + // information from matches. + Encode(dst []byte, src []byte, matches []Match, lastBlock bool) []byte + + // Reset clears any internal state, preparing the Encoder to be used with + // a new stream. + Reset() +} + +// A Writer uses MatchFinder and Encoder to write compressed data to Dest. +type Writer struct { + Dest io.Writer + MatchFinder MatchFinder + Encoder Encoder + + // BlockSize is the number of bytes to compress at a time. If it is zero, + // each Write operation will be treated as one block. + BlockSize int + + err error + inBuf []byte + outBuf []byte + matches []Match +} + +func (w *Writer) Write(p []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + + if w.BlockSize == 0 { + return w.writeBlock(p, false) + } + + w.inBuf = append(w.inBuf, p...) + var pos int + for pos = 0; pos+w.BlockSize <= len(w.inBuf) && w.err == nil; pos += w.BlockSize { + w.writeBlock(w.inBuf[pos:pos+w.BlockSize], false) + } + if pos > 0 { + n := copy(w.inBuf, w.inBuf[pos:]) + w.inBuf = w.inBuf[:n] + } + + return len(p), w.err +} + +func (w *Writer) writeBlock(p []byte, lastBlock bool) (n int, err error) { + w.outBuf = w.outBuf[:0] + w.matches = w.MatchFinder.FindMatches(w.matches[:0], p) + w.outBuf = w.Encoder.Encode(w.outBuf, p, w.matches, lastBlock) + _, w.err = w.Dest.Write(w.outBuf) + return len(p), w.err +} + +func (w *Writer) Close() error { + w.writeBlock(w.inBuf, true) + w.inBuf = w.inBuf[:0] + return w.err +} + +func (w *Writer) Reset(newDest io.Writer) { + w.MatchFinder.Reset() + w.Encoder.Reset() + w.err = nil + w.inBuf = w.inBuf[:0] + w.outBuf = w.outBuf[:0] + w.matches = w.matches[:0] + w.Dest = newDest +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go b/vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go new file mode 100644 index 00000000..75ecc590 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go @@ -0,0 +1,53 @@ +package matchfinder + +import "fmt" + +// A TextEncoder is an Encoder that produces a human-readable representation of +// the LZ77 compression. Matches are replaced with symbols. +type TextEncoder struct{} + +func (t TextEncoder) Reset() {} + +func (t TextEncoder) Encode(dst []byte, src []byte, matches []Match, lastBlock bool) []byte { + pos := 0 + for _, m := range matches { + if m.Unmatched > 0 { + dst = append(dst, src[pos:pos+m.Unmatched]...) + pos += m.Unmatched + } + if m.Length > 0 { + dst = append(dst, []byte(fmt.Sprintf("<%d,%d>", m.Length, m.Distance))...) + pos += m.Length + } + } + if pos < len(src) { + dst = append(dst, src[pos:]...) + } + return dst +} + +// A NoMatchFinder implements MatchFinder, but doesn't find any matches. +// It can be used to implement the equivalent of the standard library flate package's +// HuffmanOnly setting. +type NoMatchFinder struct{} + +func (n NoMatchFinder) Reset() {} + +func (n NoMatchFinder) FindMatches(dst []Match, src []byte) []Match { + return append(dst, Match{ + Unmatched: len(src), + }) +} + +// AutoReset wraps a MatchFinder that can return references to data in previous +// blocks, and calls Reset before each block. It is useful for (e.g.) using a +// snappy Encoder with a MatchFinder designed for flate. (Snappy doesn't +// support references between blocks.) +type AutoReset struct { + MatchFinder +} + +func (a AutoReset) FindMatches(dst []Match, src []byte) []Match { + a.Reset() + return a.MatchFinder.FindMatches(dst, src) +} diff --git a/vendor/github.com/andybalholm/brotli/reader.go b/vendor/github.com/andybalholm/brotli/reader.go index cdc67645..9419c79c 100644 --- a/vendor/github.com/andybalholm/brotli/reader.go +++ b/vendor/github.com/andybalholm/brotli/reader.go @@ -27,10 +27,16 @@ func NewReader(src io.Reader) *Reader { } // Reset discards the Reader's state and makes it equivalent to the result of -// its original state from NewReader, but writing to src instead. +// its original state from NewReader, but reading from src instead. // This permits reusing a Reader rather than allocating a new one. // Error is always nil func (r *Reader) Reset(src io.Reader) error { + if r.error_code < 0 { + // There was an unrecoverable error, leaving the Reader's state + // undefined. Clear out everything but the buffer. + *r = Reader{buf: r.buf} + } + decoderStateInit(r) r.src = src if r.buf == nil { diff --git a/vendor/github.com/andybalholm/brotli/state.go b/vendor/github.com/andybalholm/brotli/state.go index d03348fe..38d753eb 100644 --- a/vendor/github.com/andybalholm/brotli/state.go +++ b/vendor/github.com/andybalholm/brotli/state.go @@ -200,7 +200,6 @@ func decoderStateInit(s *Reader) bool { s.block_type_trees = nil s.block_len_trees = nil - s.ringbuffer = nil s.ringbuffer_size = 0 s.new_ringbuffer_size = 0 s.ringbuffer_mask = 0 diff --git a/vendor/github.com/andybalholm/brotli/writer.go b/vendor/github.com/andybalholm/brotli/writer.go index 39feaef5..8a688117 100644 --- a/vendor/github.com/andybalholm/brotli/writer.go +++ b/vendor/github.com/andybalholm/brotli/writer.go @@ -3,6 +3,8 @@ package brotli import ( "errors" "io" + + "github.com/andybalholm/brotli/matchfinder" ) const ( @@ -117,3 +119,44 @@ type nopCloser struct { } func (nopCloser) Close() error { return nil } + +// NewWriterV2 is like NewWriterLevel, but it uses the new implementation +// based on the matchfinder package. It currently supports up to level 7; +// if a higher level is specified, level 7 will be used. +func NewWriterV2(dst io.Writer, level int) *matchfinder.Writer { + var mf matchfinder.MatchFinder + if level < 2 { + mf = matchfinder.M0{Lazy: level == 1} + } else { + hashLen := 6 + if level >= 6 { + hashLen = 5 + } + chainLen := 64 + switch level { + case 2: + chainLen = 0 + case 3: + chainLen = 1 + case 4: + chainLen = 2 + case 5: + chainLen = 4 + case 6: + chainLen = 8 + } + mf = &matchfinder.M4{ + MaxDistance: 1 << 20, + ChainLength: chainLen, + HashLen: hashLen, + DistanceBitCost: 57, + } + } + + return &matchfinder.Writer{ + Dest: dst, + MatchFinder: mf, + Encoder: &Encoder{}, + BlockSize: 1 << 16, + } +} diff --git a/vendor/github.com/bodgit/plumbing/.golangci.yaml b/vendor/github.com/bodgit/plumbing/.golangci.yaml index 3d57e3e7..94477c85 100644 --- a/vendor/github.com/bodgit/plumbing/.golangci.yaml +++ b/vendor/github.com/bodgit/plumbing/.golangci.yaml @@ -2,6 +2,7 @@ linters: enable-all: true disable: + - dupword - exhaustivestruct - exhaustruct - nonamedreturns diff --git a/vendor/github.com/bodgit/plumbing/.goreleaser.yml b/vendor/github.com/bodgit/plumbing/.goreleaser.yml index 8c37d5dc..75e2a1f7 100644 --- a/vendor/github.com/bodgit/plumbing/.goreleaser.yml +++ b/vendor/github.com/bodgit/plumbing/.goreleaser.yml @@ -3,3 +3,5 @@ builds: - skip: true release: prerelease: auto +changelog: + use: github-native diff --git a/vendor/github.com/bodgit/plumbing/README.md b/vendor/github.com/bodgit/plumbing/README.md index b8df82df..79d6ec6a 100644 --- a/vendor/github.com/bodgit/plumbing/README.md +++ b/vendor/github.com/bodgit/plumbing/README.md @@ -2,8 +2,8 @@ [![Coverage Status](https://coveralls.io/repos/github/bodgit/plumbing/badge.svg?branch=master)](https://coveralls.io/github/bodgit/plumbing?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/bodgit/plumbing)](https://goreportcard.com/report/github.com/bodgit/plumbing) [![GoDoc](https://godoc.org/github.com/bodgit/plumbing?status.svg)](https://godoc.org/github.com/bodgit/plumbing) +![Go version](https://img.shields.io/badge/Go-1.19-brightgreen.svg) ![Go version](https://img.shields.io/badge/Go-1.18-brightgreen.svg) -![Go version](https://img.shields.io/badge/Go-1.17-brightgreen.svg) plumbing ======== diff --git a/vendor/github.com/bodgit/plumbing/fill.go b/vendor/github.com/bodgit/plumbing/fill.go new file mode 100644 index 00000000..353d6a60 --- /dev/null +++ b/vendor/github.com/bodgit/plumbing/fill.go @@ -0,0 +1,21 @@ +package plumbing + +import "io" + +type fillReader struct { + b byte +} + +func (r *fillReader) Read(p []byte) (int, error) { + for i := range p { + p[i] = r.b + } + + return len(p), nil +} + +// FillReader returns an io.Reader such that Read calls return an unlimited +// stream of b bytes. +func FillReader(b byte) io.Reader { + return &fillReader{b} +} diff --git a/vendor/github.com/bodgit/plumbing/padded.go b/vendor/github.com/bodgit/plumbing/padded.go index cb7f0274..675c85d6 100644 --- a/vendor/github.com/bodgit/plumbing/padded.go +++ b/vendor/github.com/bodgit/plumbing/padded.go @@ -1,7 +1,6 @@ package plumbing import ( - "bytes" "io" ) @@ -9,6 +8,5 @@ import ( // fewer than n bytes are available from r then any remaining bytes return // fill instead. func PaddedReader(r io.Reader, n int64, fill byte) io.Reader { - // Naive, but works - return io.LimitReader(io.MultiReader(r, bytes.NewBuffer(bytes.Repeat([]byte{fill}, int(n)))), n) + return io.LimitReader(io.MultiReader(r, FillReader(fill)), n) } diff --git a/vendor/github.com/bodgit/plumbing/zero.go b/vendor/github.com/bodgit/plumbing/zero.go new file mode 100644 index 00000000..943a35d1 --- /dev/null +++ b/vendor/github.com/bodgit/plumbing/zero.go @@ -0,0 +1,18 @@ +package plumbing + +import "io" + +type devZero struct { + io.Reader +} + +func (w *devZero) Write(p []byte) (int, error) { + return len(p), nil +} + +// DevZero returns an io.ReadWriter that behaves like /dev/zero such that Read +// calls return an unlimited stream of zero bytes and all Write calls succeed +// without doing anything. +func DevZero() io.ReadWriter { + return &devZero{FillReader(0)} +} diff --git a/vendor/github.com/bodgit/sevenzip/.golangci.yaml b/vendor/github.com/bodgit/sevenzip/.golangci.yaml index f74dd154..ee8fbd00 100644 --- a/vendor/github.com/bodgit/sevenzip/.golangci.yaml +++ b/vendor/github.com/bodgit/sevenzip/.golangci.yaml @@ -1,13 +1,101 @@ --- linters: - enable-all: true - disable: - - exhaustivestruct - - exhaustruct - - godox - - goerr113 - - gomnd - - ireturn - - nonamedreturns - - varnamelen - - wrapcheck + disable-all: true + enable: + - asasalint + - asciicheck + - bidichk + - bodyclose + - canonicalheader + - containedctx + - contextcheck + - copyloopvar + - cyclop + - decorder + - dogsled + - dupl + - dupword + - durationcheck + - errcheck + - errchkjson + - errname + - errorlint + - exhaustive + - fatcontext + - forbidigo + - forcetypeassert + - funlen + - gci + - ginkgolinter + - gocheckcompilerdirectives + - gochecknoglobals + - gochecknoinits + - gochecksumtype + - gocognit + - goconst + - gocritic + - gocyclo + - godot + - gofmt + - gofumpt + - goheader + - goimports + - gomoddirectives + - gomodguard + - goprintffuncname + - gosec + - gosimple + - gosmopolitan + - govet + - grouper + - importas + - inamedparam + - ineffassign + - interfacebloat + - intrange + - lll + - loggercheck + - maintidx + - makezero + - mirror + - misspell + - musttag + - nakedret + - nestif + - nilerr + - nilnil + - nlreturn + - noctx + - nolintlint + - nosprintfhostport + - paralleltest + - perfsprint + - prealloc + - predeclared + - promlinter + - protogetter + - reassign + - revive + - rowserrcheck + - sloglint + - spancheck + - sqlclosecheck + - staticcheck + - stylecheck + - tagalign + - tagliatelle + - tenv + - testableexamples + - testifylint + - testpackage + - thelper + - tparallel + - typecheck + - unconvert + - unparam + - unused + - usestdlibvars + - wastedassign + - whitespace + - wsl + - zerologlint diff --git a/vendor/github.com/bodgit/sevenzip/.pre-commit-config.yaml b/vendor/github.com/bodgit/sevenzip/.pre-commit-config.yaml new file mode 100644 index 00000000..ee68defa --- /dev/null +++ b/vendor/github.com/bodgit/sevenzip/.pre-commit-config.yaml @@ -0,0 +1,18 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace + - repo: https://github.com/commitizen-tools/commitizen + rev: v3.5.3 + hooks: + - id: commitizen + - repo: https://github.com/golangci/golangci-lint + rev: v1.60.3 + hooks: + - id: golangci-lint + - repo: https://github.com/gitleaks/gitleaks + rev: v8.18.0 + hooks: + - id: gitleaks diff --git a/vendor/github.com/bodgit/sevenzip/.release-please-manifest.json b/vendor/github.com/bodgit/sevenzip/.release-please-manifest.json new file mode 100644 index 00000000..453ca4c5 --- /dev/null +++ b/vendor/github.com/bodgit/sevenzip/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "1.5.2" +} diff --git a/vendor/github.com/bodgit/sevenzip/CHANGELOG.md b/vendor/github.com/bodgit/sevenzip/CHANGELOG.md new file mode 100644 index 00000000..40b2525f --- /dev/null +++ b/vendor/github.com/bodgit/sevenzip/CHANGELOG.md @@ -0,0 +1,39 @@ +# Changelog + +## [1.5.2](https://github.com/bodgit/sevenzip/compare/v1.5.1...v1.5.2) (2024-08-29) + + +### Bug Fixes + +* Avoid panic in Reader init (empty2.7z); header.filesInfo is nil. ([#252](https://github.com/bodgit/sevenzip/issues/252)) ([10d7550](https://github.com/bodgit/sevenzip/commit/10d75506fa01719e9e0f074c4e7b3c3b96f4233d)) +* Lint fixes ([#253](https://github.com/bodgit/sevenzip/issues/253)) ([c82d2e9](https://github.com/bodgit/sevenzip/commit/c82d2e90e52ae81797b0f790fabe90baf35bf581)) + +## [1.5.1](https://github.com/bodgit/sevenzip/compare/v1.5.0...v1.5.1) (2024-04-05) + + +### Performance Improvements + +* Add AES key caching ([#189](https://github.com/bodgit/sevenzip/issues/189)) ([3d794c2](https://github.com/bodgit/sevenzip/commit/3d794c26c683fe80def4496d49106679b868ae2e)) +* Don't use pools for streams with one file ([#194](https://github.com/bodgit/sevenzip/issues/194)) ([b4cfdcf](https://github.com/bodgit/sevenzip/commit/b4cfdcfe0a64380d64c112d41a870dc8c33c1274)) + +## [1.5.0](https://github.com/bodgit/sevenzip/compare/v1.4.5...v1.5.0) (2024-02-08) + + +### Features + +* Export the folder/stream identifier ([#169](https://github.com/bodgit/sevenzip/issues/169)) ([187a49e](https://github.com/bodgit/sevenzip/commit/187a49e243ec0618b527851fcee0503d8436e7c2)) + +## [1.4.5](https://github.com/bodgit/sevenzip/compare/v1.4.4...v1.4.5) (2023-12-12) + + +### Bug Fixes + +* Handle lack of CRC digests ([#143](https://github.com/bodgit/sevenzip/issues/143)) ([4ead944](https://github.com/bodgit/sevenzip/commit/4ead944ad71398931b70a09ea40ba9ce742f4bf7)) +* Handle small reads in branch converters ([#144](https://github.com/bodgit/sevenzip/issues/144)) ([dfaf538](https://github.com/bodgit/sevenzip/commit/dfaf538402be45e6cd12064b3d49e7496d2b22f4)) + +## [1.4.4](https://github.com/bodgit/sevenzip/compare/v1.4.3...v1.4.4) (2023-11-06) + + +### Bug Fixes + +* Handle panic when unpack info is missing ([#117](https://github.com/bodgit/sevenzip/issues/117)) ([db3ba77](https://github.com/bodgit/sevenzip/commit/db3ba775286aa4efce8fdd1c398bf2bd4dfba37d)) diff --git a/vendor/github.com/bodgit/sevenzip/LICENSE b/vendor/github.com/bodgit/sevenzip/LICENSE index 08172a91..5a19a8fc 100644 --- a/vendor/github.com/bodgit/sevenzip/LICENSE +++ b/vendor/github.com/bodgit/sevenzip/LICENSE @@ -27,4 +27,3 @@ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/bodgit/sevenzip/README.md b/vendor/github.com/bodgit/sevenzip/README.md index d8de0cdf..673c14a9 100644 --- a/vendor/github.com/bodgit/sevenzip/README.md +++ b/vendor/github.com/bodgit/sevenzip/README.md @@ -1,13 +1,14 @@ +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/bodgit/sevenzip/badge)](https://securityscorecards.dev/viewer/?uri=github.com/bodgit/sevenzip) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/6882/badge)](https://www.bestpractices.dev/projects/6882) [![GitHub release](https://img.shields.io/github/v/release/bodgit/sevenzip)](https://github.com/bodgit/sevenzip/releases) -[![Build Status](https://img.shields.io/github/workflow/status/bodgit/sevenzip/build)](https://github.com/bodgit/sevenzip/actions?query=workflow%3Abuild) +[![Build Status](https://img.shields.io/github/actions/workflow/status/bodgit/sevenzip/build.yml?branch=main)](https://github.com/bodgit/sevenzip/actions?query=workflow%3ABuild) [![Coverage Status](https://coveralls.io/repos/github/bodgit/sevenzip/badge.svg?branch=master)](https://coveralls.io/github/bodgit/sevenzip?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/bodgit/sevenzip)](https://goreportcard.com/report/github.com/bodgit/sevenzip) [![GoDoc](https://godoc.org/github.com/bodgit/sevenzip?status.svg)](https://godoc.org/github.com/bodgit/sevenzip) -![Go version](https://img.shields.io/badge/Go-1.18-brightgreen.svg) -![Go version](https://img.shields.io/badge/Go-1.17-brightgreen.svg) +![Go version](https://img.shields.io/badge/Go-1.22-brightgreen.svg) +![Go version](https://img.shields.io/badge/Go-1.21-brightgreen.svg) -sevenzip -======== +# sevenzip A reader for 7-zip archives inspired by `archive/zip`. @@ -18,8 +19,103 @@ Current status: * Handles compressed headers, (`7za a -mhc=on test.7z ...`). * Handles password-protected versions of both of the above (`7za a -mhc=on|off -mhe=on -ppassword test.7z ...`). * Handles archives split into multiple volumes, (`7za a -v100m test.7z ...`). +* Handles self-extracting archives, (`7za a -sfx archive.exe ...`). * Validates CRC values as it parses the file. -* Supports BCJ2, Brotli, Bzip2, Copy, Deflate, Delta, LZ4, LZMA, LZMA2 and Zstandard methods. +* Supports ARM, BCJ, BCJ2, Brotli, Bzip2, Copy, Deflate, Delta, LZ4, LZMA, LZMA2, PPC, SPARC and Zstandard methods. * Implements the `fs.FS` interface so you can treat an opened 7-zip archive like a filesystem. More examples of 7-zip archives are needed to test all of the different combinations/algorithms possible. + +## Frequently Asked Questions + +### Why is my code running so slow? + +Someone might write the following simple code: +```golang +func extractArchive(archive string) error { + r, err := sevenzip.OpenReader(archive) + if err != nil { + return err + } + defer r.Close() + + for _, f := range r.File { + rc, err := f.Open() + if err != nil { + return err + } + defer rc.Close() + + // Extract the file + } + + return nil +} +``` +Unlike a zip archive where every file is individually compressed, 7-zip archives can have all of the files compressed together in one long compressed stream, supposedly to achieve a better compression ratio. +In a naive random access implementation, to read the first file you start at the beginning of the compressed stream and read out that files worth of bytes. +To read the second file you have to start at the beginning of the compressed stream again, read and discard the first files worth of bytes to get to the correct offset in the stream, then read out the second files worth of bytes. +You can see that for an archive that contains hundreds of files, extraction can get progressively slower as you have to read and discard more and more data just to get to the right offset in the stream. + +This package contains an optimisation that caches and reuses the underlying compressed stream reader so you don't have to keep starting from the beginning for each file, but it does require you to call `rc.Close()` before extracting the next file. +So write your code similar to this: +```golang +func extractFile(file *sevenzip.File) error { + rc, err := f.Open() + if err != nil { + return err + } + defer rc.Close() + + // Extract the file + + return nil +} + +func extractArchive(archive string) error { + r, err := sevenzip.OpenReader(archive) + if err != nil { + return err + } + defer r.Close() + + for _, f := range r.File { + if err = extractFile(f); err != nil { + return err + } + } + + return nil +} +``` +You can see the main difference is to not defer all of the `Close()` calls until the end of `extractArchive()`. + +There is a set of benchmarks in this package that demonstrates the performance boost that the optimisation provides, amongst other techniques: +``` +$ go test -v -run='^$' -bench='Reader$' -benchtime=60s +goos: darwin +goarch: amd64 +pkg: github.com/bodgit/sevenzip +cpu: Intel(R) Core(TM) i9-8950HK CPU @ 2.90GHz +BenchmarkNaiveReader +BenchmarkNaiveReader-12 2 31077542628 ns/op +BenchmarkOptimisedReader +BenchmarkOptimisedReader-12 434 164854747 ns/op +BenchmarkNaiveParallelReader +BenchmarkNaiveParallelReader-12 240 361869339 ns/op +BenchmarkNaiveSingleParallelReader +BenchmarkNaiveSingleParallelReader-12 412 171027895 ns/op +BenchmarkParallelReader +BenchmarkParallelReader-12 636 112551812 ns/op +PASS +ok github.com/bodgit/sevenzip 472.251s +``` +The archive used here is just the reference LZMA SDK archive, which is only 1 MiB in size but does contain 630+ files split across three compression streams. +The only difference between BenchmarkNaiveReader and the rest is the lack of a call to `rc.Close()` between files so the stream reuse optimisation doesn't take effect. + +Don't try and blindly throw goroutines at the problem either as this can also undo the optimisation; a naive implementation that uses a pool of multiple goroutines to extract each file ends up being nearly 50% slower, even just using a pool of one goroutine can end up being less efficient. +The optimal way to employ goroutines is to make use of the `sevenzip.FileHeader.Stream` field; extract files with the same value using the same goroutine. +This achieves a 50% speed improvement with the LZMA SDK archive, but it very much depends on how many streams there are in the archive. + +In general, don't try and extract the files in a different order compared to the natural order within the archive as that will also undo the optimisation. +The worst scenario would likely be to extract the archive in reverse order. diff --git a/vendor/github.com/bodgit/sevenzip/internal/aes7z/key.go b/vendor/github.com/bodgit/sevenzip/internal/aes7z/key.go index 2ff37e77..79c78ddc 100644 --- a/vendor/github.com/bodgit/sevenzip/internal/aes7z/key.go +++ b/vendor/github.com/bodgit/sevenzip/internal/aes7z/key.go @@ -4,12 +4,47 @@ import ( "bytes" "crypto/sha256" "encoding/binary" + "encoding/hex" + lru "github.com/hashicorp/golang-lru/v2" + "go4.org/syncutil" "golang.org/x/text/encoding/unicode" "golang.org/x/text/transform" ) -func calculateKey(password string, cycles int, salt []byte) []byte { +type cacheKey struct { + password string + cycles int + salt string // []byte isn't comparable +} + +const cacheSize = 10 + +//nolint:gochecknoglobals +var ( + once syncutil.Once + cache *lru.Cache[cacheKey, []byte] +) + +func calculateKey(password string, cycles int, salt []byte) ([]byte, error) { + if err := once.Do(func() (err error) { + cache, err = lru.New[cacheKey, []byte](cacheSize) + + return + }); err != nil { + return nil, err + } + + ck := cacheKey{ + password: password, + cycles: cycles, + salt: hex.EncodeToString(salt), + } + + if key, ok := cache.Get(ck); ok { + return key, nil + } + b := bytes.NewBuffer(salt) // Convert password to UTF-16LE @@ -27,8 +62,11 @@ func calculateKey(password string, cycles int, salt []byte) []byte { _, _ = h.Write(b.Bytes()) _ = binary.Write(h, binary.LittleEndian, i) } + copy(key, h.Sum(nil)) } - return key + _ = cache.Add(ck, key) + + return key, nil } diff --git a/vendor/github.com/bodgit/sevenzip/internal/aes7z/reader.go b/vendor/github.com/bodgit/sevenzip/internal/aes7z/reader.go index 025de2bc..760db29d 100644 --- a/vendor/github.com/bodgit/sevenzip/internal/aes7z/reader.go +++ b/vendor/github.com/bodgit/sevenzip/internal/aes7z/reader.go @@ -1,40 +1,45 @@ package aes7z import ( + "bytes" "crypto/aes" "crypto/cipher" "errors" "io" - - "github.com/connesc/cipherio" ) var errProperties = errors.New("aes7z: not enough properties") type readCloser struct { rc io.ReadCloser - br io.Reader salt, iv []byte cycles int + cbc cipher.BlockMode + buf bytes.Buffer } func (rc *readCloser) Close() error { var err error if rc.rc != nil { err = rc.rc.Close() - rc.rc, rc.br = nil, nil + rc.rc = nil } return err } func (rc *readCloser) Password(p string) error { - block, err := aes.NewCipher(calculateKey(p, rc.cycles, rc.salt)) + key, err := calculateKey(p, rc.cycles, rc.salt) + if err != nil { + return err + } + + block, err := aes.NewCipher(key) if err != nil { return err } - rc.br = cipherio.NewBlockReader(rc.rc, cipher.NewCBCDecrypter(block, rc.iv)) + rc.cbc = cipher.NewCBCDecrypter(block, rc.iv) return nil } @@ -44,11 +49,27 @@ func (rc *readCloser) Read(p []byte) (int, error) { return 0, errors.New("aes7z: Read after Close") } - if rc.br == nil { + if rc.cbc == nil { return 0, errors.New("aes7z: no password set") } - return rc.br.Read(p) + var block [aes.BlockSize]byte + + for rc.buf.Len() < len(p) { + if _, err := io.ReadFull(rc.rc, block[:]); err != nil { + if errors.Is(err, io.EOF) { + break + } + + return 0, err + } + + rc.cbc.CryptBlocks(block[:], block[:]) + + _, _ = rc.buf.Write(block[:]) + } + + return rc.buf.Read(p) } // NewReader returns a new AES-256-CBC & SHA-256 io.ReadCloser. The Password @@ -78,7 +99,7 @@ func NewReader(p []byte, _ uint64, readers []io.ReadCloser) (io.ReadCloser, erro } rc.salt = p[2 : 2+salt] - rc.iv = make([]byte, 16) + rc.iv = make([]byte, aes.BlockSize) copy(rc.iv, p[2+salt:]) rc.cycles = int(p[0] & 0x3f) diff --git a/vendor/github.com/bodgit/sevenzip/internal/bcj2/reader.go b/vendor/github.com/bodgit/sevenzip/internal/bcj2/reader.go index 5e0bb0a7..343ec5f7 100644 --- a/vendor/github.com/bodgit/sevenzip/internal/bcj2/reader.go +++ b/vendor/github.com/bodgit/sevenzip/internal/bcj2/reader.go @@ -49,7 +49,7 @@ type readCloser struct { sd [256 + 2]uint previous byte - written uint64 + written uint32 buf *bytes.Buffer } @@ -189,7 +189,7 @@ func (rc *readCloser) read() error { return err } - dest -= uint32(rc.written + 4) + dest -= rc.written + 4 _ = binary.Write(rc.buf, binary.LittleEndian, dest) rc.previous = byte(dest >> 24) diff --git a/vendor/github.com/bodgit/sevenzip/internal/bra/arm.go b/vendor/github.com/bodgit/sevenzip/internal/bra/arm.go new file mode 100644 index 00000000..3916a0c2 --- /dev/null +++ b/vendor/github.com/bodgit/sevenzip/internal/bra/arm.go @@ -0,0 +1,55 @@ +package bra + +import ( + "encoding/binary" + "io" +) + +const armAlignment = 4 + +type arm struct { + ip uint32 +} + +func (c *arm) Size() int { return armAlignment } + +func (c *arm) Convert(b []byte, encoding bool) int { + if len(b) < c.Size() { + return 0 + } + + if c.ip == 0 { + c.ip += armAlignment + } + + var i int + + for i = 0; i < len(b) & ^(armAlignment-1); i += armAlignment { + v := binary.LittleEndian.Uint32(b[i:]) + + c.ip += uint32(armAlignment) + + if b[i+3] == 0xeb { + v <<= 2 + + if encoding { + v += c.ip + } else { + v -= c.ip + } + + v >>= 2 + v &= 0x00ffffff + v |= 0xeb000000 + } + + binary.LittleEndian.PutUint32(b[i:], v) + } + + return i +} + +// NewARMReader returns a new ARM io.ReadCloser. +func NewARMReader(_ []byte, _ uint64, readers []io.ReadCloser) (io.ReadCloser, error) { + return newReader(readers, new(arm)) +} diff --git a/vendor/github.com/bodgit/sevenzip/internal/bra/bcj.go b/vendor/github.com/bodgit/sevenzip/internal/bra/bcj.go new file mode 100644 index 00000000..05f1fdff --- /dev/null +++ b/vendor/github.com/bodgit/sevenzip/internal/bra/bcj.go @@ -0,0 +1,104 @@ +package bra + +import ( + "encoding/binary" + "io" +) + +const bcjLookAhead = 4 + +type bcj struct { + ip, state uint32 +} + +func (c *bcj) Size() int { return bcjLookAhead + 1 } + +func test86MSByte(b byte) bool { + return (b+1)&0xfe == 0 +} + +//nolint:cyclop,funlen,gocognit +func (c *bcj) Convert(b []byte, encoding bool) int { + if len(b) < c.Size() { + return 0 + } + + var ( + pos uint32 + mask = c.state & 7 + ) + + for { + p := pos + for ; int(p) < len(b)-bcjLookAhead; p++ { + if b[p]&0xfe == 0xe8 { + break + } + } + + d := p - pos + pos = p + + if int(p) >= len(b)-bcjLookAhead { + if d > 2 { + c.state = 0 + } else { + c.state = mask >> d + } + + c.ip += pos + + return int(pos) + } + + if d > 2 { + mask = 0 + } else { + mask >>= d + if mask != 0 && (mask > 4 || mask == 3 || test86MSByte(b[p+(mask>>1)+1])) { + mask = (mask >> 1) | 4 + pos++ + + continue + } + } + + //nolint:nestif + if test86MSByte(b[p+4]) { + v := binary.LittleEndian.Uint32(b[p+1:]) + cur := c.ip + uint32(c.Size()) + pos //nolint:gosec + pos += uint32(c.Size()) //nolint:gosec + + if encoding { + v += cur + } else { + v -= cur + } + + if mask != 0 { + sh := mask & 6 << 2 + if test86MSByte(byte(v >> sh)) { + v ^= (uint32(0x100) << sh) - 1 + if encoding { + v += cur + } else { + v -= cur + } + } + + mask = 0 + } + + binary.LittleEndian.PutUint32(b[p+1:], v) + b[p+4] = 0 - b[p+4]&1 + } else { + mask = (mask >> 1) | 4 + pos++ + } + } +} + +// NewBCJReader returns a new BCJ io.ReadCloser. +func NewBCJReader(_ []byte, _ uint64, readers []io.ReadCloser) (io.ReadCloser, error) { + return newReader(readers, new(bcj)) +} diff --git a/vendor/github.com/bodgit/sevenzip/internal/bra/bra.go b/vendor/github.com/bodgit/sevenzip/internal/bra/bra.go new file mode 100644 index 00000000..a2f0daa7 --- /dev/null +++ b/vendor/github.com/bodgit/sevenzip/internal/bra/bra.go @@ -0,0 +1,6 @@ +package bra + +type converter interface { + Size() int + Convert(b []byte, encoding bool) int +} diff --git a/vendor/github.com/bodgit/sevenzip/internal/bra/minmax_compat.go b/vendor/github.com/bodgit/sevenzip/internal/bra/minmax_compat.go new file mode 100644 index 00000000..8004c6a6 --- /dev/null +++ b/vendor/github.com/bodgit/sevenzip/internal/bra/minmax_compat.go @@ -0,0 +1,21 @@ +//go:build !1.21 + +package bra + +//nolint:predeclared +func min(x, y int) int { + if x < y { + return x + } + + return y +} + +//nolint:predeclared +func max(x, y int) int { + if x > y { + return x + } + + return y +} diff --git a/vendor/github.com/bodgit/sevenzip/internal/bra/ppc.go b/vendor/github.com/bodgit/sevenzip/internal/bra/ppc.go new file mode 100644 index 00000000..9d38243f --- /dev/null +++ b/vendor/github.com/bodgit/sevenzip/internal/bra/ppc.go @@ -0,0 +1,48 @@ +package bra + +import ( + "encoding/binary" + "io" +) + +const ppcAlignment = 4 + +type ppc struct { + ip uint32 +} + +func (c *ppc) Size() int { return ppcAlignment } + +func (c *ppc) Convert(b []byte, encoding bool) int { + if len(b) < c.Size() { + return 0 + } + + var i int + + for i = 0; i < len(b) & ^(ppcAlignment-1); i += ppcAlignment { + v := binary.BigEndian.Uint32(b[i:]) + + if b[i+0]&0xfc == 0x48 && b[i+3]&3 == 1 { + if encoding { + v += c.ip + } else { + v -= c.ip + } + + v &= 0x03ffffff + v |= 0x48000000 + } + + c.ip += uint32(ppcAlignment) + + binary.BigEndian.PutUint32(b[i:], v) + } + + return i +} + +// NewPPCReader returns a new PPC io.ReadCloser. +func NewPPCReader(_ []byte, _ uint64, readers []io.ReadCloser) (io.ReadCloser, error) { + return newReader(readers, new(ppc)) +} diff --git a/vendor/github.com/bodgit/sevenzip/internal/bra/reader.go b/vendor/github.com/bodgit/sevenzip/internal/bra/reader.go new file mode 100644 index 00000000..42edf152 --- /dev/null +++ b/vendor/github.com/bodgit/sevenzip/internal/bra/reader.go @@ -0,0 +1,58 @@ +package bra + +import ( + "bytes" + "errors" + "io" +) + +type readCloser struct { + rc io.ReadCloser + buf bytes.Buffer + n int + conv converter +} + +func (rc *readCloser) Close() (err error) { + if rc.rc != nil { + err = rc.rc.Close() + rc.rc = nil + } + + return +} + +func (rc *readCloser) Read(p []byte) (int, error) { + if rc.rc == nil { + return 0, errors.New("bra: Read after Close") + } + + if _, err := io.CopyN(&rc.buf, rc.rc, int64(max(len(p), rc.conv.Size())-rc.buf.Len())); err != nil { + if !errors.Is(err, io.EOF) { + return 0, err + } + + if rc.buf.Len() < rc.conv.Size() { + rc.n = rc.buf.Len() + } + } + + rc.n += rc.conv.Convert(rc.buf.Bytes()[rc.n:], false) + + n, err := rc.buf.Read(p[:min(rc.n, len(p))]) + + rc.n -= n + + return n, err +} + +func newReader(readers []io.ReadCloser, conv converter) (io.ReadCloser, error) { + if len(readers) != 1 { + return nil, errors.New("bra: need exactly one reader") + } + + return &readCloser{ + rc: readers[0], + conv: conv, + }, nil +} diff --git a/vendor/github.com/bodgit/sevenzip/internal/bra/sparc.go b/vendor/github.com/bodgit/sevenzip/internal/bra/sparc.go new file mode 100644 index 00000000..8aa45536 --- /dev/null +++ b/vendor/github.com/bodgit/sevenzip/internal/bra/sparc.go @@ -0,0 +1,53 @@ +package bra + +import ( + "encoding/binary" + "io" +) + +const sparcAlignment = 4 + +type sparc struct { + ip uint32 +} + +func (c *sparc) Size() int { return sparcAlignment } + +func (c *sparc) Convert(b []byte, encoding bool) int { + if len(b) < c.Size() { + return 0 + } + + var i int + + for i = 0; i < len(b) & ^(sparcAlignment-1); i += sparcAlignment { + v := binary.BigEndian.Uint32(b[i:]) + + if (b[i+0] == 0x40 && b[i+1]&0xc0 == 0) || (b[i+0] == 0x7f && b[i+1] >= 0xc0) { + v <<= 2 + + if encoding { + v += c.ip + } else { + v -= c.ip + } + + v &= 0x01ffffff + v -= uint32(1) << 24 + v ^= 0xff000000 + v >>= 2 + v |= 0x40000000 + } + + c.ip += uint32(sparcAlignment) + + binary.BigEndian.PutUint32(b[i:], v) + } + + return i +} + +// NewSPARCReader returns a new SPARC io.ReadCloser. +func NewSPARCReader(_ []byte, _ uint64, readers []io.ReadCloser) (io.ReadCloser, error) { + return newReader(readers, new(sparc)) +} diff --git a/vendor/github.com/bodgit/sevenzip/internal/lzma/reader.go b/vendor/github.com/bodgit/sevenzip/internal/lzma/reader.go index 20868465..485d9f6c 100644 --- a/vendor/github.com/bodgit/sevenzip/internal/lzma/reader.go +++ b/vendor/github.com/bodgit/sevenzip/internal/lzma/reader.go @@ -41,7 +41,7 @@ func NewReader(p []byte, s uint64, readers []io.ReadCloser) (io.ReadCloser, erro h := bytes.NewBuffer(p) _ = binary.Write(h, binary.LittleEndian, s) - lr, err := lzma.NewReader(io.MultiReader(h, readers[0])) + lr, err := lzma.NewReader(multiReader(h, readers[0])) if err != nil { return nil, err } @@ -51,3 +51,35 @@ func NewReader(p []byte, s uint64, readers []io.ReadCloser) (io.ReadCloser, erro r: lr, }, nil } + +func multiReader(b *bytes.Buffer, rc io.ReadCloser) io.Reader { + mr := io.MultiReader(b, rc) + + if br, ok := rc.(io.ByteReader); ok { + return &multiByteReader{ + b: b, + br: br, + mr: mr, + } + } + + return mr +} + +type multiByteReader struct { + b *bytes.Buffer + br io.ByteReader + mr io.Reader +} + +func (m *multiByteReader) ReadByte() (byte, error) { + if m.b.Len() > 0 { + return m.b.ReadByte() + } + + return m.br.ReadByte() +} + +func (m *multiByteReader) Read(p []byte) (n int, err error) { + return m.mr.Read(p) +} diff --git a/vendor/github.com/bodgit/sevenzip/internal/pool/pool.go b/vendor/github.com/bodgit/sevenzip/internal/pool/pool.go index 3389fb0e..4cb61cf0 100644 --- a/vendor/github.com/bodgit/sevenzip/internal/pool/pool.go +++ b/vendor/github.com/bodgit/sevenzip/internal/pool/pool.go @@ -11,8 +11,8 @@ import ( // Pooler is the interface implemented by a pool. type Pooler interface { - Get(int64) (util.SizeReadSeekCloser, bool) - Put(int64, util.SizeReadSeekCloser) (bool, error) + Get(offset int64) (util.SizeReadSeekCloser, bool) + Put(offset int64, rc util.SizeReadSeekCloser) (bool, error) } // Constructor is the function prototype used to instantiate a pool. diff --git a/vendor/github.com/bodgit/sevenzip/internal/zstd/reader.go b/vendor/github.com/bodgit/sevenzip/internal/zstd/reader.go index 89f39938..0d68a3cd 100644 --- a/vendor/github.com/bodgit/sevenzip/internal/zstd/reader.go +++ b/vendor/github.com/bodgit/sevenzip/internal/zstd/reader.go @@ -52,6 +52,7 @@ func NewReader(_ []byte, _ uint64, readers []io.ReadCloser) (io.ReadCloser, erro if r, err = zstd.NewReader(readers[0]); err != nil { return nil, err } + runtime.SetFinalizer(r, (*zstd.Decoder).Close) } diff --git a/vendor/github.com/bodgit/sevenzip/reader.go b/vendor/github.com/bodgit/sevenzip/reader.go index b71c7b84..4e3d845e 100644 --- a/vendor/github.com/bodgit/sevenzip/reader.go +++ b/vendor/github.com/bodgit/sevenzip/reader.go @@ -30,9 +30,6 @@ var ( errTooMuch = errors.New("sevenzip: too much data") ) -//nolint:gochecknoglobals -var newPool pool.Constructor = pool.NewPool - // A Reader serves content from a 7-Zip archive. type Reader struct { r io.ReaderAt @@ -142,7 +139,7 @@ func (f *File) Open() (io.ReadCloser, error) { return &fileReader{ rc: rc, f: f, - n: int64(f.UncompressedSize), + n: int64(f.UncompressedSize), //nolint:gosec }, nil } @@ -254,41 +251,96 @@ func NewReader(r io.ReaderAt, size int64) (*Reader, error) { func (z *Reader) folderReader(si *streamsInfo, f int) (*folderReadCloser, uint32, error) { // Create a SectionReader covering all of the streams data - return si.FolderReader(io.NewSectionReader(z.r, z.start, z.end), f, z.p) + return si.FolderReader(io.NewSectionReader(z.r, z.start, z.end-z.start), f, z.p) } -//nolint:cyclop,funlen,gocognit +const ( + chunkSize = 4096 + searchLimit = 1 << 20 // 1 MiB +) + +func findSignature(r io.ReaderAt, search []byte) ([]int64, error) { + chunk := make([]byte, chunkSize+len(search)) + offsets := make([]int64, 0, 2) + + for offset := int64(0); offset < searchLimit; offset += chunkSize { + n, err := r.ReadAt(chunk, offset) + + for i := 0; ; { + idx := bytes.Index(chunk[i:n], search) + if idx == -1 { + break + } + + offsets = append(offsets, offset+int64(i+idx)) + if offsets[0] == 0 { + // If signature is at the beginning, return immediately, it's a regular archive + return offsets, nil + } + + i += idx + 1 + } + + if err != nil { + if errors.Is(err, io.EOF) { + break + } + + return nil, err + } + } + + return offsets, nil +} + +//nolint:cyclop,funlen,gocognit,gocyclo func (z *Reader) init(r io.ReaderAt, size int64) error { h := crc32.NewIEEE() tra := plumbing.TeeReaderAt(r, h) - sr := io.NewSectionReader(tra, 0, size) // Will only read first 32 bytes - var sh signatureHeader - if err := binary.Read(sr, binary.LittleEndian, &sh); err != nil { + signature := []byte{'7', 'z', 0xbc, 0xaf, 0x27, 0x1c} + + offsets, err := findSignature(r, signature) + if err != nil { return err } - signature := []byte{'7', 'z', 0xbc, 0xaf, 0x27, 0x1c} - if !bytes.Equal(sh.Signature[:], signature) { + if len(offsets) == 0 { return errFormat } - z.r = r - - h.Reset() - var ( - err error + sr *io.SectionReader + off int64 start startHeader ) - if err = binary.Read(sr, binary.LittleEndian, &start); err != nil { - return err + for _, off = range offsets { + sr = io.NewSectionReader(tra, off, size-off) // Will only read first 32 bytes + + var sh signatureHeader + if err = binary.Read(sr, binary.LittleEndian, &sh); err != nil { + return err + } + + z.r = r + + h.Reset() + + if err = binary.Read(sr, binary.LittleEndian, &start); err != nil { + return err + } + + // CRC of the start header should match + if util.CRC32Equal(h.Sum(nil), sh.CRC) { + break + } + + err = errChecksum } - // CRC of the start header should match - if !util.CRC32Equal(h.Sum(nil), sh.CRC) { - return errChecksum + if err != nil { + return err } // Work out where we are in the file (32, avoiding magic numbers) @@ -297,14 +349,17 @@ func (z *Reader) init(r io.ReaderAt, size int64) error { } // Seek over the streams - if z.end, err = sr.Seek(int64(start.Offset), io.SeekCurrent); err != nil { + if z.end, err = sr.Seek(int64(start.Offset), io.SeekCurrent); err != nil { //nolint:gosec return err } + z.start += off + z.end += off + h.Reset() // Bound bufio.Reader otherwise it can read trailing garbage which screws up the CRC check - br := bufio.NewReader(io.NewSectionReader(tra, z.end, int64(start.Size))) + br := bufio.NewReader(io.NewSectionReader(tra, z.end, int64(start.Size))) //nolint:gosec id, err := br.ReadByte() if err != nil { @@ -364,47 +419,73 @@ func (z *Reader) init(r io.ReaderAt, size int64) error { z.si = header.streamsInfo - z.pool = make([]pool.Pooler, z.si.Folders()) - for i := range z.pool { - if z.pool[i], err = newPool(); err != nil { - return err - } - } - // spew.Dump(header) + filesPerStream := make(map[int]int, z.si.Folders()) - folder, offset := 0, int64(0) - z.File = make([]*File, 0, len(header.filesInfo.file)) - j := 0 + if header.filesInfo != nil { + folder, offset := 0, int64(0) + z.File = make([]*File, 0, len(header.filesInfo.file)) + j := 0 - for _, fh := range header.filesInfo.file { - f := new(File) - f.zip = z - f.FileHeader = fh + for _, fh := range header.filesInfo.file { + f := new(File) + f.zip = z + f.FileHeader = fh - if f.FileHeader.FileInfo().IsDir() && !strings.HasSuffix(f.FileHeader.Name, "/") { - f.FileHeader.Name += "/" - } + if f.FileHeader.FileInfo().IsDir() && !strings.HasSuffix(f.FileHeader.Name, "/") { + f.FileHeader.Name += "/" + } + + if !fh.isEmptyStream && !fh.isEmptyFile { + f.folder, _ = header.streamsInfo.FileFolderAndSize(j) - if !fh.isEmptyStream && !fh.isEmptyFile { - f.folder, _ = header.streamsInfo.FileFolderAndSize(j) + // Make an exported copy of the folder index + f.Stream = f.folder - if f.folder != folder { - offset = 0 + filesPerStream[f.folder]++ + + if f.folder != folder { + offset = 0 + } + + f.offset = offset + offset += int64(f.UncompressedSize) //nolint:gosec + folder = f.folder + j++ } - f.offset = offset - offset += int64(f.UncompressedSize) - folder = f.folder - j++ + z.File = append(z.File, f) } + } + + // spew.Dump(filesPerStream) - z.File = append(z.File, f) + z.pool = make([]pool.Pooler, z.si.Folders()) + for i := range z.pool { + var newPool pool.Constructor = pool.NewNoopPool + + if filesPerStream[i] > 1 { + newPool = pool.NewPool + } + + if z.pool[i], err = newPool(); err != nil { + return err + } } return nil } +// Volumes returns the list of volumes that have been opened as part of the current archive. +func (rc *ReadCloser) Volumes() []string { + volumes := make([]string, len(rc.f)) + for idx, f := range rc.f { + volumes[idx] = f.Name() + } + + return volumes +} + // Close closes the 7-zip file or volumes, rendering them unusable for I/O. func (rc *ReadCloser) Close() error { var err *multierror.Error @@ -473,7 +554,7 @@ func toValidName(name string) string { return p } -//nolint:cyclop,gocognit +//nolint:cyclop,funlen func (z *Reader) initFileList() { z.fileListOnce.Do(func() { files := make(map[string]int) @@ -512,12 +593,14 @@ func (z *Reader) initFileList() { isDir: isDir, } z.fileList = append(z.fileList, entry) + if isDir { knownDirs[name] = idx } else { files[name] = idx } } + for dir := range dirs { if _, ok := knownDirs[dir]; !ok { if idx, ok := files[dir]; ok { diff --git a/vendor/github.com/bodgit/sevenzip/register.go b/vendor/github.com/bodgit/sevenzip/register.go index bd950b1a..a08a6798 100644 --- a/vendor/github.com/bodgit/sevenzip/register.go +++ b/vendor/github.com/bodgit/sevenzip/register.go @@ -7,6 +7,7 @@ import ( "github.com/bodgit/sevenzip/internal/aes7z" "github.com/bodgit/sevenzip/internal/bcj2" + "github.com/bodgit/sevenzip/internal/bra" "github.com/bodgit/sevenzip/internal/brotli" "github.com/bodgit/sevenzip/internal/bzip2" "github.com/bodgit/sevenzip/internal/deflate" @@ -42,8 +43,16 @@ func init() { RegisterDecompressor([]byte{0x03}, Decompressor(delta.NewReader)) // LZMA RegisterDecompressor([]byte{0x03, 0x01, 0x01}, Decompressor(lzma.NewReader)) + // BCJ + RegisterDecompressor([]byte{0x03, 0x03, 0x01, 0x03}, Decompressor(bra.NewBCJReader)) // BCJ2 RegisterDecompressor([]byte{0x03, 0x03, 0x01, 0x1b}, Decompressor(bcj2.NewReader)) + // PPC + RegisterDecompressor([]byte{0x03, 0x03, 0x02, 0x05}, Decompressor(bra.NewPPCReader)) + // ARM + RegisterDecompressor([]byte{0x03, 0x03, 0x05, 0x01}, Decompressor(bra.NewARMReader)) + // SPARC + RegisterDecompressor([]byte{0x03, 0x03, 0x08, 0x05}, Decompressor(bra.NewSPARCReader)) // Deflate RegisterDecompressor([]byte{0x04, 0x01, 0x08}, Decompressor(deflate.NewReader)) // Bzip2 diff --git a/vendor/github.com/bodgit/sevenzip/release-please-config.json b/vendor/github.com/bodgit/sevenzip/release-please-config.json new file mode 100644 index 00000000..cb967212 --- /dev/null +++ b/vendor/github.com/bodgit/sevenzip/release-please-config.json @@ -0,0 +1,6 @@ +{ + "packages": { + ".": {} + }, + "release-type": "go" +} diff --git a/vendor/github.com/bodgit/sevenzip/struct.go b/vendor/github.com/bodgit/sevenzip/struct.go index d03975b7..f3ce50e5 100644 --- a/vendor/github.com/bodgit/sevenzip/struct.go +++ b/vendor/github.com/bodgit/sevenzip/struct.go @@ -18,7 +18,7 @@ var errAlgorithm = errors.New("sevenzip: unsupported compression algorithm") // CryptoReadCloser adds a Password method to decompressors. type CryptoReadCloser interface { - Password(string) error + Password(password string) error } type signatureHeader struct { @@ -39,7 +39,6 @@ type packInfo struct { streams uint64 size []uint64 digest []uint32 - defined []bool } type coder struct { @@ -98,7 +97,7 @@ func (f *folder) coderReader(readers []io.ReadCloser, coder uint64, password str } } - return plumbing.LimitReadCloser(cr, int64(f.size[coder])), nil + return plumbing.LimitReadCloser(cr, int64(f.size[coder])), nil //nolint:gosec } type folderReadCloser struct { @@ -119,9 +118,9 @@ func (rc *folderReadCloser) Seek(offset int64, whence int) (int64, error) { case io.SeekStart: newo = offset case io.SeekCurrent: - newo = int64(rc.wc.Count()) + offset + newo = int64(rc.wc.Count()) + offset //nolint:gosec case io.SeekEnd: - newo = rc.size + offset + newo = rc.Size() + offset default: return 0, errors.New("invalid whence") } @@ -130,15 +129,15 @@ func (rc *folderReadCloser) Seek(offset int64, whence int) (int64, error) { return 0, errors.New("negative seek") } - if newo < int64(rc.wc.Count()) { + if uint64(newo) < rc.wc.Count() { return 0, errors.New("cannot seek backwards") } - if newo > rc.size { + if newo > rc.Size() { return 0, errors.New("cannot seek beyond EOF") } - if _, err := io.CopyN(io.Discard, rc, newo-int64(rc.wc.Count())); err != nil { + if _, err := io.CopyN(io.Discard, rc, newo-int64(rc.wc.Count())); err != nil { //nolint:gosec return 0, err } @@ -174,16 +173,14 @@ func (f *folder) unpackSize() uint64 { } type unpackInfo struct { - folder []*folder - digest []uint32 - defined []bool + folder []*folder + digest []uint32 } type subStreamsInfo struct { streams []uint64 size []uint64 digest []uint32 - defined []bool } type streamsInfo struct { @@ -205,13 +202,15 @@ func (si *streamsInfo) FileFolderAndSize(file int) (int, uint64) { var ( folder int - streams uint64 + streams uint64 = 1 ) - for folder, streams = range si.subStreamsInfo.streams { - total += streams - if uint64(file) < total { - break + if si.subStreamsInfo != nil { + for folder, streams = range si.subStreamsInfo.streams { + total += streams + if uint64(file) < total { + break + } } } @@ -233,7 +232,7 @@ func (si *streamsInfo) folderOffset(folder int) int64 { k += si.unpackInfo.folder[i].packedStreams } - return int64(si.packInfo.position + offset) + return int64(si.packInfo.position + offset) //nolint:gosec } //nolint:cyclop,funlen @@ -250,7 +249,7 @@ func (si *streamsInfo) FolderReader(r io.ReaderAt, folder int, password string) offset := int64(0) for i, input := range f.packed { - size := int64(si.packInfo.size[packedOffset+i]) + size := int64(si.packInfo.size[packedOffset+i]) //nolint:gosec in[input] = util.NopCloser(bufio.NewReader(io.NewSectionReader(r, si.folderOffset(folder)+offset, size))) offset += size } @@ -298,7 +297,7 @@ func (si *streamsInfo) FolderReader(r io.ReaderAt, folder int, password string) return nil, 0, errors.New("expecting one unbound output stream") } - fr := newFolderReadCloser(out[unbound[0]], int64(f.unpackSize())) + fr := newFolderReadCloser(out[unbound[0]], int64(f.unpackSize())) //nolint:gosec if si.unpackInfo.digest != nil { return fr, si.unpackInfo.digest[folder], nil @@ -325,8 +324,14 @@ type FileHeader struct { Attributes uint32 CRC32 uint32 UncompressedSize uint64 - isEmptyStream bool - isEmptyFile bool + + // Stream is an opaque identifier representing the compressed stream + // that contains the file. Any File with the same value can be assumed + // to be stored within the same stream. + Stream int + + isEmptyStream bool + isEmptyFile bool } // FileInfo returns an fs.FileInfo for the FileHeader. @@ -339,7 +344,7 @@ type headerFileInfo struct { } func (fi headerFileInfo) Name() string { return path.Base(fi.fh.Name) } -func (fi headerFileInfo) Size() int64 { return int64(fi.fh.UncompressedSize) } +func (fi headerFileInfo) Size() int64 { return int64(fi.fh.UncompressedSize) } //nolint:gosec func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } func (fi headerFileInfo) ModTime() time.Time { return fi.fh.Modified.UTC() } func (fi headerFileInfo) Mode() fs.FileMode { return fi.fh.Mode() } diff --git a/vendor/github.com/bodgit/sevenzip/types.go b/vendor/github.com/bodgit/sevenzip/types.go index b4556585..c518d415 100644 --- a/vendor/github.com/bodgit/sevenzip/types.go +++ b/vendor/github.com/bodgit/sevenzip/types.go @@ -46,8 +46,9 @@ const ( ) var ( - errIncompleteRead = errors.New("sevenzip: incomplete read") - errUnexpectedID = errors.New("sevenzip: unexpected id") + errIncompleteRead = errors.New("sevenzip: incomplete read") + errUnexpectedID = errors.New("sevenzip: unexpected id") + errMissingUnpackInfo = errors.New("sevenzip: missing unpack info") ) func readUint64(r io.ByteReader) (uint64, error) { @@ -131,24 +132,23 @@ func readSizes(r io.ByteReader, count uint64) ([]uint64, error) { return sizes, nil } -func readCRC(r util.Reader, count uint64) ([]uint32, []bool, error) { +func readCRC(r util.Reader, count uint64) ([]uint32, error) { defined, err := readOptionalBool(r, count) if err != nil { - return nil, nil, err + return nil, err } crcs := make([]uint32, count) - for i := uint64(0); i < count; i++ { - var crc uint32 - if err := binary.Read(r, binary.LittleEndian, &crc); err != nil { - return nil, nil, fmt.Errorf("readCRC: Read error: %w", err) + for i := range defined { + if defined[i] { + if err := binary.Read(r, binary.LittleEndian, &crcs[i]); err != nil { + return nil, fmt.Errorf("readCRC: Read error: %w", err) + } } - - crcs[i] = crc } - return crcs, defined, nil + return crcs, nil } //nolint:cyclop @@ -184,7 +184,7 @@ func readPackInfo(r util.Reader) (*packInfo, error) { } if id == idCRC { - if p.digest, p.defined, err = readCRC(r, p.streams); err != nil { + if p.digest, err = readCRC(r, p.streams); err != nil { return nil, err } @@ -240,7 +240,7 @@ func readCoder(r util.Reader) (*coder, error) { } c.properties = make([]byte, size) - if n, err := r.Read(c.properties); err != nil || n != int(size) { + if n, err := r.Read(c.properties); err != nil || uint64(n) != size { if err != nil { return nil, fmt.Errorf("readCoder: Read error: %w", err) } @@ -384,7 +384,7 @@ func readUnpackInfo(r util.Reader) (*unpackInfo, error) { } if id == idCRC { - if u.digest, u.defined, err = readCRC(r, folders); err != nil { + if u.digest, err = readCRC(r, folders); err != nil { return nil, err } @@ -461,7 +461,7 @@ func readSubStreamsInfo(r util.Reader, folder []*folder) (*subStreamsInfo, error } if id == idCRC { - if s.digest, s.defined, err = readCRC(r, files); err != nil { + if s.digest, err = readCRC(r, files); err != nil { return nil, err } @@ -510,6 +510,10 @@ func readStreamsInfo(r util.Reader) (*streamsInfo, error) { } if id == idSubStreamsInfo { + if s.unpackInfo == nil { + return nil, errMissingUnpackInfo + } + if s.subStreamsInfo, err = readSubStreamsInfo(r, s.unpackInfo.folder); err != nil { return nil, err } @@ -528,7 +532,7 @@ func readStreamsInfo(r util.Reader) (*streamsInfo, error) { } func readTimes(r util.Reader, count uint64) ([]time.Time, error) { - _, err := readOptionalBool(r, count) + defined, err := readOptionalBool(r, count) if err != nil { return nil, err } @@ -550,15 +554,17 @@ func readTimes(r util.Reader, count uint64) ([]time.Time, error) { return nil, errors.New("sevenzip: TODO readTimes external") //nolint:goerr113 } - times := make([]time.Time, 0, count) + times := make([]time.Time, count) - for i := uint64(0); i < count; i++ { - var ft windows.Filetime - if err := binary.Read(r, binary.LittleEndian, &ft); err != nil { - return nil, fmt.Errorf("readTimes: Read error: %w", err) - } + for i := range defined { + if defined[i] { + var ft windows.Filetime + if err := binary.Read(r, binary.LittleEndian, &ft); err != nil { + return nil, fmt.Errorf("readTimes: Read error: %w", err) + } - times = append(times, time.Unix(0, ft.Nanoseconds()).UTC()) + times[i] = time.Unix(0, ft.Nanoseconds()).UTC() + } } return times, nil @@ -599,7 +605,7 @@ func readNames(r util.Reader, count, length uint64) ([]string, error) { } utf16le := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM) - scanner := bufio.NewScanner(transform.NewReader(io.LimitReader(r, int64(length-1)), utf16le.NewDecoder())) + scanner := bufio.NewScanner(transform.NewReader(io.LimitReader(r, int64(length-1)), utf16le.NewDecoder())) //nolint:gosec,lll scanner.Split(splitNull) names, i := make([]string, 0, count), uint64(0) @@ -620,7 +626,7 @@ func readNames(r util.Reader, count, length uint64) ([]string, error) { } func readAttributes(r util.Reader, count uint64) ([]uint32, error) { - _, err := readOptionalBool(r, count) + defined, err := readOptionalBool(r, count) if err != nil { return nil, err } @@ -643,9 +649,12 @@ func readAttributes(r util.Reader, count uint64) ([]uint32, error) { } attributes := make([]uint32, count) - for i := uint64(0); i < count; i++ { - if err := binary.Read(r, binary.LittleEndian, &attributes[i]); err != nil { - return nil, fmt.Errorf("readAttributes: Read error: %w", err) + + for i := range defined { + if defined[i] { + if err := binary.Read(r, binary.LittleEndian, &attributes[i]); err != nil { + return nil, fmt.Errorf("readAttributes: Read error: %w", err) + } } } @@ -705,8 +714,8 @@ func readFilesInfo(r util.Reader) (*filesInfo, error) { for i := range f.file { if f.file[i].isEmptyStream { f.file[i].isEmptyFile = empty[j] + j++ } - j++ } case idCTime: times, err := readTimes(r, files) @@ -756,7 +765,7 @@ func readFilesInfo(r util.Reader) (*filesInfo, error) { case idStartPos: return nil, errors.New("sevenzip: TODO idStartPos") //nolint:goerr113 case idDummy: - if _, err := io.CopyN(io.Discard, r, int64(length)); err != nil { + if _, err := io.CopyN(io.Discard, r, int64(length)); err != nil { //nolint:gosec return nil, fmt.Errorf("readFilesInfo: CopyN error: %w", err) } default: @@ -777,7 +786,7 @@ func readHeader(r util.Reader) (*header, error) { } if id == idArchiveProperties { - return nil, errors.New("sevenzip: TODO idArchiveProperties") //nolint:goerr113 + return nil, errors.New("sevenzip: TODO idArchiveProperties") //nolint:goerr113,revive //nolint:govet id, err = r.ReadByte() @@ -787,7 +796,7 @@ func readHeader(r util.Reader) (*header, error) { } if id == idAdditionalStreamsInfo { - return nil, errors.New("sevenzip: TODO idAdditionalStreamsInfo") //nolint:goerr113 + return nil, errors.New("sevenzip: TODO idAdditionalStreamsInfo") //nolint:goerr113,revive //nolint:govet id, err = r.ReadByte() @@ -822,6 +831,10 @@ func readHeader(r util.Reader) (*header, error) { return nil, errUnexpectedID } + if h.streamsInfo == nil || h.filesInfo == nil { + return h, nil + } + j := 0 for i := range h.filesInfo.file { @@ -829,7 +842,10 @@ func readHeader(r util.Reader) (*header, error) { continue } - h.filesInfo.file[i].CRC32 = h.streamsInfo.subStreamsInfo.digest[j] + if h.streamsInfo.subStreamsInfo != nil { + h.filesInfo.file[i].CRC32 = h.streamsInfo.subStreamsInfo.digest[j] + } + _, h.filesInfo.file[i].UncompressedSize = h.streamsInfo.FileFolderAndSize(j) j++ } diff --git a/vendor/github.com/bodgit/windows/.golangci.yaml b/vendor/github.com/bodgit/windows/.golangci.yaml new file mode 100644 index 00000000..f74dd154 --- /dev/null +++ b/vendor/github.com/bodgit/windows/.golangci.yaml @@ -0,0 +1,13 @@ +--- +linters: + enable-all: true + disable: + - exhaustivestruct + - exhaustruct + - godox + - goerr113 + - gomnd + - ireturn + - nonamedreturns + - varnamelen + - wrapcheck diff --git a/vendor/github.com/bodgit/sevenzip/.goreleaser.yml b/vendor/github.com/bodgit/windows/.goreleaser.yml similarity index 63% rename from vendor/github.com/bodgit/sevenzip/.goreleaser.yml rename to vendor/github.com/bodgit/windows/.goreleaser.yml index 8c37d5dc..75e2a1f7 100644 --- a/vendor/github.com/bodgit/sevenzip/.goreleaser.yml +++ b/vendor/github.com/bodgit/windows/.goreleaser.yml @@ -3,3 +3,5 @@ builds: - skip: true release: prerelease: auto +changelog: + use: github-native diff --git a/vendor/github.com/bodgit/windows/.travis.yml b/vendor/github.com/bodgit/windows/.travis.yml deleted file mode 100644 index aedc286f..00000000 --- a/vendor/github.com/bodgit/windows/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -language: go -sudo: false -go: - - tip -before_install: - - go get github.com/mattn/goveralls -script: - - $GOPATH/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/bodgit/windows/README.md b/vendor/github.com/bodgit/windows/README.md index b35f7a82..1c3fb31a 100644 --- a/vendor/github.com/bodgit/windows/README.md +++ b/vendor/github.com/bodgit/windows/README.md @@ -1,11 +1,14 @@ -[![Build Status](https://travis-ci.com/bodgit/windows.svg?branch=master)](https://travis-ci.com/bodgit/windows) -[![Coverage Status](https://coveralls.io/repos/github/bodgit/windows/badge.svg?branch=master)](https://coveralls.io/github/bodgit/windows?branch=master) +[![GitHub release](https://img.shields.io/github/v/release/bodgit/windows)](https://github.com/bodgit/windows/releases) +[![Build Status](https://img.shields.io/github/workflow/status/bodgit/windows/build)](https://github.com/bodgit/windows/actions?query=workflow%3Abuild) +[![Coverage Status](https://coveralls.io/repos/github/bodgit/windows/badge.svg?branch=main)](https://coveralls.io/github/bodgit/windows?branch=main) [![Go Report Card](https://goreportcard.com/badge/github.com/bodgit/windows)](https://goreportcard.com/report/github.com/bodgit/windows) [![GoDoc](https://godoc.org/github.com/bodgit/windows?status.svg)](https://godoc.org/github.com/bodgit/windows) +![Go version](https://img.shields.io/badge/Go-1.18-brightgreen.svg) +![Go version](https://img.shields.io/badge/Go-1.17-brightgreen.svg) windows ======= A collection of types native to Windows but are useful on non-Windows platforms. -The `FILETIME`-comparable type is the sole export which is a 1:1 copy of the one from `golang.org/x/sys/windows`. However, that package isn't available for all platforms and this particular type gets used in other protocols and file types such as NTLMv2 and 7-Zip. +The `FILETIME`-equivalent type is the sole export which is a 1:1 copy of the type found in the `golang.org/x/sys/windows` package. That package only builds on `GOOS=windows` and this particular type gets used in other protocols and file types such as NTLMv2 and 7-zip. diff --git a/vendor/github.com/bodgit/windows/filetime.go b/vendor/github.com/bodgit/windows/filetime.go index 7beb183a..96e678d6 100644 --- a/vendor/github.com/bodgit/windows/filetime.go +++ b/vendor/github.com/bodgit/windows/filetime.go @@ -26,6 +26,7 @@ func (ft *Filetime) Nanoseconds() int64 { nsec -= offset // convert into nanoseconds nsec *= 100 + return nsec } @@ -38,5 +39,6 @@ func NsecToFiletime(nsec int64) (ft Filetime) { // split into high / low ft.LowDateTime = uint32(nsec & 0xffffffff) ft.HighDateTime = uint32(nsec >> 32 & 0xffffffff) + return ft } diff --git a/vendor/github.com/connesc/cipherio/.gitignore b/vendor/github.com/connesc/cipherio/.gitignore deleted file mode 100644 index 4170155f..00000000 --- a/vendor/github.com/connesc/cipherio/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/go.sum diff --git a/vendor/github.com/connesc/cipherio/LICENSE b/vendor/github.com/connesc/cipherio/LICENSE deleted file mode 100644 index 024bc59e..00000000 --- a/vendor/github.com/connesc/cipherio/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2020, Cédric Connes - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/connesc/cipherio/README.md b/vendor/github.com/connesc/cipherio/README.md deleted file mode 100644 index 850ceb6d..00000000 --- a/vendor/github.com/connesc/cipherio/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# cipherio - -[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c)](https://pkg.go.dev/github.com/connesc/cipherio) -[![Go Report Card](https://goreportcard.com/badge/github.com/connesc/cipherio)](https://goreportcard.com/report/github.com/connesc/cipherio) -[![GitHub tag](https://img.shields.io/github/v/tag/connesc/cipherio?sort=semver)](https://github.com/connesc/cipherio/tags) -[![License](https://img.shields.io/github/license/connesc/cipherio)](LICENSE) - -This Golang package allows to use block ciphers with `io.Reader` and `io.Writer`. - -Golang already provides [`io.Reader`](https://golang.org/pkg/io/#Reader) and [`io.Writer`](https://golang.org/pkg/io/#Writer) implementations for [`cipher.Stream`](https://golang.org/pkg/crypto/cipher/#Stream), but not for [`cipher.BlockMode`](https://golang.org/pkg/crypto/cipher/#BlockMode) (such as AES-CBC). The purpose of this package is to fill the gap. - -Block ciphers require data size to be a multiple of the block size. The `io.Reader` and `io.Writer` implementations found here can either enforce this requirement or automatically apply a user-defined padding. - -This package has been written with performance in mind: buffering and copies are avoided as much as possible. diff --git a/vendor/github.com/connesc/cipherio/doc.go b/vendor/github.com/connesc/cipherio/doc.go deleted file mode 100644 index f9e95dd0..00000000 --- a/vendor/github.com/connesc/cipherio/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package cipherio allows to use block ciphers with io.Reader and io.Writer. -// -// Golang already provides io.Reader and io.Writer implementations for cipher.Stream, but not for -// cipher.BlockMode (such as AES-CBC). The purpose of this package is to fill the gap. -// -// Block ciphers require data size to be a multiple of the block size. The io.Reader and io.Writer -// implementations found here can either enforce this requirement or automatically apply a -// user-defined padding. -// -// This package has been written with performance in mind: buffering and copies are avoided as much -// as possible. -package cipherio diff --git a/vendor/github.com/connesc/cipherio/padding.go b/vendor/github.com/connesc/cipherio/padding.go deleted file mode 100644 index ba5338b0..00000000 --- a/vendor/github.com/connesc/cipherio/padding.go +++ /dev/null @@ -1,54 +0,0 @@ -package cipherio - -import "fmt" - -// Padding defines how to fill an incomplete block. -type Padding interface { - Fill(dst []byte) -} - -// PaddingFunc allows to implement the Padding interface with a padding function. -type PaddingFunc func(dst []byte) - -// Fill an incomplete block. -func (p PaddingFunc) Fill(dst []byte) { - p(dst) -} - -// ZeroPadding fills an incomplete block with zeroes. -var ZeroPadding = PaddingFunc(zeroPadding) - -// BitPadding fills an incomplete block with 0x80 followed by zeroes. -// -// This is defined by ISO/IEC 9797-1 as Padding Method 2 and is also known as ISO padding. -var BitPadding = PaddingFunc(bitPadding) - -// PKCS7Padding fills an incomplete block by repeating the total number of padding bytes. -// -// PKCS#7 is described by RFC 5652. -// -// WARNING: this padding method MUST NOT be used with a block size larger than 256 bytes. -var PKCS7Padding = PaddingFunc(pkcs7Padding) - -func fill(dst []byte, val byte) { - for i := range dst { - dst[i] = val - } -} - -func zeroPadding(dst []byte) { - fill(dst, 0) -} - -func bitPadding(dst []byte) { - dst[0] = 0x80 - fill(dst[1:], 0) -} - -func pkcs7Padding(dst []byte) { - n := len(dst) - if n > 255 { - panic(fmt.Errorf("cipherio: PKCS#7 padding cannot fill more than 255 bytes: %d > 255", n)) - } - fill(dst, byte(n)) -} diff --git a/vendor/github.com/connesc/cipherio/reader.go b/vendor/github.com/connesc/cipherio/reader.go deleted file mode 100644 index e471e647..00000000 --- a/vendor/github.com/connesc/cipherio/reader.go +++ /dev/null @@ -1,178 +0,0 @@ -package cipherio - -import ( - "crypto/cipher" - "io" -) - -type blockReader struct { - src io.Reader - blockMode cipher.BlockMode - padding Padding - blockSize int - buf []byte // used to store remaining bytes (before or after crypting) - crypted int // if > 0, then buf contains remaining crypted bytes - err error -} - -// NewBlockReader wraps the given Reader to add on-the-fly encryption or decryption using the -// given BlockMode. -// -// Data must be aligned to the cipher block size: ErrUnexpectedEOF is returned if EOF is reached in -// the middle of a block. -// -// This Reader avoids buffering and copies as much as possible. A call to Read leads to at most -// one Read from the wrapped Reader. Unless the destination buffer is smaller than BlockSize, -// (en|de)cryption happens inplace within it. -// -// There is no dynamic allocation: an internal buffer of BlockSize bytes is used to store both -// incomplete blocks (not yet (en|de)crypted) and partially read blocks (already (en|de)crypted). -// -// The wrapped Reader is guaranteed to never be consumed beyond the last requested block. This -// means that it is safe to stop reading from this Reader at a block boundary and then resume -// reading from the wrapped Reader for another purpose. -func NewBlockReader(src io.Reader, blockMode cipher.BlockMode) io.Reader { - return NewBlockReaderWithPadding(src, blockMode, nil) -} - -// NewBlockReaderWithPadding is similar to NewBlockReader, except that any incomplete block is -// filled with the given padding instead of returning ErrUnexpectedEOF. -func NewBlockReaderWithPadding(src io.Reader, blockMode cipher.BlockMode, padding Padding) io.Reader { - blockSize := blockMode.BlockSize() - - return &blockReader{ - src: src, - blockMode: blockMode, - padding: padding, - blockSize: blockSize, - buf: make([]byte, 0, blockSize), - crypted: 0, - err: nil, - } -} - -func (r *blockReader) readCryptedBuf(p []byte) int { - n := copy(p, r.buf[r.blockSize-r.crypted:]) - r.crypted -= n - return n -} - -func (r *blockReader) Read(p []byte) (int, error) { - count := 0 - - // Read previously crypted bytes, even if an error has already been encountered. Stop early if - // the crypted buffer cannot be entirely consumed. - if r.crypted > 0 { - n := r.readCryptedBuf(p) - p = p[n:] - count += n - if r.crypted > 0 { - return count, nil - } - r.buf = r.buf[:0] - } - // At this point, the internal buffer cannot contain crypted bytes anymore. - - // Return the previously saved error, if any. - if r.err != nil { - return count, r.err - } - - // Stop early if there is no more space in the destination buffer. - if len(p) == 0 { - return count, nil - } - - // If the destination buffer is smaller than BlockSize, then use the internal buffer. - if len(p) < r.blockSize { - // The internal buffer may already contain some bytes, try to fill the rest with a single - // Read. - n, err := r.src.Read(r.buf[len(r.buf):r.blockSize]) - r.buf = r.buf[:len(r.buf)+n] - - // Apply padding if EOF is reached in the middle of a block. - if err == io.EOF && len(r.buf) < r.blockSize && r.padding != nil { - r.padding.Fill(r.buf[len(r.buf):r.blockSize]) - r.buf = r.buf[:r.blockSize] - } - - // Crypt the buffered block if complete, then fill the destination buffer with the first - // crypted bytes. - if len(r.buf) == r.blockSize { - r.blockMode.CryptBlocks(r.buf, r.buf) - r.crypted = r.blockSize - count += r.readCryptedBuf(p) - } - - // Save any encountered error. - r.err = err - - if r.crypted > 0 { - // Hide any error until crypted bytes have been entirely consumed. - err = nil - } else if err == io.EOF && len(r.buf) > 0 { - // If EOF is reached in the middle of a block, convert it to ErrUnexpectedEOF. - err = io.ErrUnexpectedEOF - r.err = err - } - return count, err - } - // Otherwise, use the destination buffer. - - // Initialize the destination buffer with buffered bytes, then try to fill the rest with a - // single Read. - copy(p, r.buf) - n, err := r.src.Read(p[len(r.buf):]) - available := len(r.buf) + n - exceeding := available % r.blockSize - cryptable := available - exceeding - - // Crypt all complete blocks. - if cryptable > 0 { - r.blockMode.CryptBlocks(p[:cryptable], p[:cryptable]) - p = p[cryptable:] - count += cryptable - } - - // Store exceeding bytes to the internal buffer. - r.buf = r.buf[:exceeding] - copy(r.buf, p) - // At this point, both the destination and the internal buffers contain the exceeding bytes. - - // Save any encountered error. - r.err = err - - // Handle EOF when encountered in the middle of a block. - if err == io.EOF && exceeding > 0 { - if r.padding == nil { - // If no padding is defined, convert EOF to ErrUnexpectedEOF. - err = io.ErrUnexpectedEOF - r.err = err - - } else if len(p) < r.blockSize { - // If padding does not fit the destination buffer, then use the internal buffer. - r.padding.Fill(r.buf[exceeding:r.blockSize]) - r.buf = r.buf[:r.blockSize] - - // Crypt the padded block, then fill the rest of the destination buffer with the first - // crypted bytes. - r.blockMode.CryptBlocks(r.buf, r.buf) - r.crypted = r.blockSize - count += r.readCryptedBuf(p) - - // Hide any error until crypted bytes have been entirely consumed. - if r.crypted > 0 { - err = nil - } - - } else { - // Otherwise, apply padding to the destination buffer and crypt the padded block. - r.padding.Fill(p[exceeding:r.blockSize]) - r.buf = r.buf[:0] - r.blockMode.CryptBlocks(p[:r.blockSize], p[:r.blockSize]) - count += r.blockSize - } - } - - return count, err -} diff --git a/vendor/github.com/connesc/cipherio/writer.go b/vendor/github.com/connesc/cipherio/writer.go deleted file mode 100644 index 9f200de7..00000000 --- a/vendor/github.com/connesc/cipherio/writer.go +++ /dev/null @@ -1,151 +0,0 @@ -package cipherio - -import ( - "crypto/cipher" - "io" -) - -type blockWriter struct { - dst io.Writer - blockMode cipher.BlockMode - padding Padding - blockSize int - buf []byte // used to store both incomplete and crypted blocks - err error -} - -// NewBlockWriter wraps the given Writer to add on-the-fly encryption or decryption using the -// given BlockMode. -// -// Data must be aligned to the cipher block size: ErrUnexpectedEOF is returned if Close is called -// in the middle of a block. -// -// This Writer allocates an internal buffer of 1024 blocks, which is freed when an error is -// encountered or when Close is called. Other than that, there is no dynamic allocation. -// -// Close must be called at least once. After that, Close becomes a no-op and Write must not be -// called anymore. -func NewBlockWriter(dst io.Writer, blockMode cipher.BlockMode) io.WriteCloser { - return NewBlockWriterWithPadding(dst, blockMode, nil) -} - -// NewBlockWriterWithPadding is similar to NewBlockWriter, except that Close fills any incomplete -// block with the given padding instead of returning ErrUnexpectedEOF. -func NewBlockWriterWithPadding(dst io.Writer, blockMode cipher.BlockMode, padding Padding) io.WriteCloser { - blockSize := blockMode.BlockSize() - - return &blockWriter{ - dst: dst, - blockMode: blockMode, - padding: padding, - blockSize: blockSize, - buf: make([]byte, 0, 1024*blockSize), - err: nil, - } -} - -func (w *blockWriter) Write(p []byte) (int, error) { - count := 0 - - // Return the previously saved error, if any. - if w.err != nil { - return count, w.err - } - - // While complete blocks are available, crypt as many as possible in the internal buffer and - // write the result to the destination writer. - for len(w.buf)+len(p) >= w.blockSize { - // Initialize src with remaining bytes. - src := w.buf - remaining := len(src) - - // Clear remaining bytes. - w.buf = w.buf[:0] - - // If src contains an incomplete block then fill it and crypt it inplace. - if remaining > 0 { - src = src[:w.blockSize] - - copied := copy(src[remaining:], p) - p = p[copied:] - - w.blockMode.CryptBlocks(src, src) - } - - // Otherwise, determine how many complete blocks can be stored in src. - cryptable := cap(src) - len(src) - if len(p) < cryptable { - cryptable = (len(p) / w.blockSize) * w.blockSize - } - - // If any, crypt them and store the result in src at the same time. This avoids a - // preliminary copy. - if cryptable > 0 { - w.blockMode.CryptBlocks(src[len(src):cap(src)], p[:cryptable]) - p = p[cryptable:] - src = src[:len(src)+cryptable] - } - - // Now that src is filled with crypted blocks, write them to the destination writer. - n, err := w.dst.Write(src) - - // Count written bytes, except those that come from the internal buffer, because they have - // already been aknowledged by the previous call. - if n > remaining { - count += n - remaining - } - - // If any error is encountered, save it, free the internal buffer and stop immediately. - if err != nil { - w.err = err - w.buf = nil - return count, err - } - } - - // If an incomplete block remains, store it in the internal buffer and consider it as written. - if len(p) > 0 { - remaining := len(w.buf) - w.buf = w.buf[:remaining+len(p)] - copied := copy(w.buf[remaining:], p) - count += copied - } - - return count, nil -} - -func (w *blockWriter) Close() error { - // Return the previously saved error, if any. - if w.err != nil { - return w.err - } - - // Initialize src with remaining bytes. - src := w.buf - remaining := len(src) - - // Free the internal buffer. - w.buf = nil - - // Stop early if the internal buffer does not contain an incomplete block. - if remaining == 0 { - return nil - } - - // Return ErrUnexpectedEOF if no padding is defined. - if w.padding == nil { - w.err = io.ErrUnexpectedEOF - return w.err - } - - // Fill the incomplete block with padding. - src = src[:w.blockSize] - w.padding.Fill(src[remaining:]) - - // Crypt the last block inplace. - w.blockMode.CryptBlocks(src, src) - - // Write the last block to the destination writer. - _, w.err = w.dst.Write(src) - return w.err -} diff --git a/vendor/github.com/dsnet/compress/bzip2/bwt.go b/vendor/github.com/dsnet/compress/bzip2/bwt.go index 44a2541f..4ed2d744 100644 --- a/vendor/github.com/dsnet/compress/bzip2/bwt.go +++ b/vendor/github.com/dsnet/compress/bzip2/bwt.go @@ -15,6 +15,7 @@ import "github.com/dsnet/compress/bzip2/internal/sais" // Transform, such that a SA can be converted to a BWT in O(n) time. // // References: +// // http://www.hpl.hp.com/techreports/Compaq-DEC/SRC-RR-124.pdf // https://github.com/cscott/compressjs/blob/master/lib/BWT.js // https://www.quora.com/How-can-I-optimize-burrows-wheeler-transform-and-inverse-transform-to-work-in-O-n-time-O-n-space diff --git a/vendor/github.com/dsnet/compress/bzip2/common.go b/vendor/github.com/dsnet/compress/bzip2/common.go index c6339815..ae4c966e 100644 --- a/vendor/github.com/dsnet/compress/bzip2/common.go +++ b/vendor/github.com/dsnet/compress/bzip2/common.go @@ -5,9 +5,11 @@ // Package bzip2 implements the BZip2 compressed data format. // // Canonical C implementation: +// // http://bzip.org // // Unofficial format specification: +// // https://github.com/dsnet/compress/blob/master/doc/bzip2-format.pdf package bzip2 diff --git a/vendor/github.com/dsnet/compress/bzip2/fuzz_off.go b/vendor/github.com/dsnet/compress/bzip2/fuzz_off.go index ddd32f50..ec894e2e 100644 --- a/vendor/github.com/dsnet/compress/bzip2/fuzz_off.go +++ b/vendor/github.com/dsnet/compress/bzip2/fuzz_off.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.md file. +//go:build !gofuzz // +build !gofuzz // This file exists to suppress fuzzing details from release builds. diff --git a/vendor/github.com/dsnet/compress/bzip2/fuzz_on.go b/vendor/github.com/dsnet/compress/bzip2/fuzz_on.go index 54122351..0bae7718 100644 --- a/vendor/github.com/dsnet/compress/bzip2/fuzz_on.go +++ b/vendor/github.com/dsnet/compress/bzip2/fuzz_on.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.md file. +//go:build gofuzz // +build gofuzz // This file exists to export internal implementation details for fuzz testing. diff --git a/vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go b/vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go index 5c71b343..8f5c1ac9 100644 --- a/vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go +++ b/vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go @@ -14,6 +14,7 @@ import "github.com/dsnet/compress/internal/errors" // normal two's complement arithmetic. The methodology for doing so is below. // // Assuming the following: +// // num: The value being encoded by RLE encoding. // run: A sequence of RUNA and RUNB symbols represented as a binary integer, // where RUNA is the 0 bit, RUNB is the 1 bit, and least-significant RUN @@ -21,6 +22,7 @@ import "github.com/dsnet/compress/internal/errors" // cnt: The number of RUNA and RUNB symbols. // // Then the RLE encoding used by bzip2 has this mathematical property: +// // num+1 == (1< 4 // 111110 <=> 5 // 111111 <=> 6 Invalid tree index, so should fail -// var encSel, decSel = func() (e prefix.Encoder, d prefix.Decoder) { var selCodes [maxNumTrees + 1]prefix.PrefixCode for i := range selCodes { @@ -150,6 +149,7 @@ func (pw *prefixWriter) WritePrefixCodes(codes []prefix.PrefixCodes, trees []pre // handleDegenerateCodes converts a degenerate tree into a canonical tree. // // For example, when the input is an under-subscribed tree: +// // input: []PrefixCode{ // {Sym: 0, Len: 3}, // {Sym: 1, Len: 4}, @@ -165,6 +165,7 @@ func (pw *prefixWriter) WritePrefixCodes(codes []prefix.PrefixCodes, trees []pre // } // // For example, when the input is an over-subscribed tree: +// // input: []PrefixCode{ // {Sym: 0, Len: 1}, // {Sym: 1, Len: 3}, diff --git a/vendor/github.com/dsnet/compress/bzip2/rle1.go b/vendor/github.com/dsnet/compress/bzip2/rle1.go index 1d789f65..b96f0cfc 100644 --- a/vendor/github.com/dsnet/compress/bzip2/rle1.go +++ b/vendor/github.com/dsnet/compress/bzip2/rle1.go @@ -17,9 +17,11 @@ var rleDone = errorf(errors.Unknown, "RLE1 stage is completed") // run lengths of 256..259. The decoder can handle the latter case. // // For example, if the input was: +// // input: "AAAAAAABBBBCCCD" // // Then the output will be: +// // output: "AAAA\x03BBBB\x00CCCD" type runLengthEncoding struct { buf []byte diff --git a/vendor/github.com/dsnet/compress/internal/debug.go b/vendor/github.com/dsnet/compress/internal/debug.go index 01df1f89..92435377 100644 --- a/vendor/github.com/dsnet/compress/internal/debug.go +++ b/vendor/github.com/dsnet/compress/internal/debug.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.md file. +//go:build debug && !gofuzz // +build debug,!gofuzz package internal diff --git a/vendor/github.com/dsnet/compress/internal/errors/errors.go b/vendor/github.com/dsnet/compress/internal/errors/errors.go index c631afbd..daf3fe93 100644 --- a/vendor/github.com/dsnet/compress/internal/errors/errors.go +++ b/vendor/github.com/dsnet/compress/internal/errors/errors.go @@ -17,6 +17,7 @@ // recover from errors only generated from within this repository. // // Example usage: +// // func Foo() (err error) { // defer errors.Recover(&err) // @@ -28,7 +29,6 @@ // errors.Panic(errors.New("whoopsie")) // } // } -// package errors import "strings" diff --git a/vendor/github.com/dsnet/compress/internal/gofuzz.go b/vendor/github.com/dsnet/compress/internal/gofuzz.go index 5035c9d6..38f44d0e 100644 --- a/vendor/github.com/dsnet/compress/internal/gofuzz.go +++ b/vendor/github.com/dsnet/compress/internal/gofuzz.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.md file. +//go:build gofuzz // +build gofuzz package internal diff --git a/vendor/github.com/dsnet/compress/internal/prefix/debug.go b/vendor/github.com/dsnet/compress/internal/prefix/debug.go index 04fce70b..2a1cb25a 100644 --- a/vendor/github.com/dsnet/compress/internal/prefix/debug.go +++ b/vendor/github.com/dsnet/compress/internal/prefix/debug.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.md file. +//go:build debug // +build debug package prefix diff --git a/vendor/github.com/dsnet/compress/internal/prefix/prefix.go b/vendor/github.com/dsnet/compress/internal/prefix/prefix.go index c73e748e..0c333f92 100644 --- a/vendor/github.com/dsnet/compress/internal/prefix/prefix.go +++ b/vendor/github.com/dsnet/compress/internal/prefix/prefix.go @@ -91,8 +91,8 @@ func (pc PrefixCodes) checkPrefixes() bool { // checkCanonical reports whether all codes are canonical. // That is, they have the following properties: // -// 1. All codes of a given bit-length are consecutive values. -// 2. Shorter codes lexicographically precede longer codes. +// 1. All codes of a given bit-length are consecutive values. +// 2. Shorter codes lexicographically precede longer codes. // // The codes must have unique symbols and be sorted by the symbol // The Len and Val fields in each code must be populated. diff --git a/vendor/github.com/dsnet/compress/internal/prefix/range.go b/vendor/github.com/dsnet/compress/internal/prefix/range.go index b7eddad5..15ec9343 100644 --- a/vendor/github.com/dsnet/compress/internal/prefix/range.go +++ b/vendor/github.com/dsnet/compress/internal/prefix/range.go @@ -37,6 +37,7 @@ func (rcs RangeCodes) End() uint32 { return rcs[len(rcs)-1].End() } // checkValid reports whether the RangeCodes is valid. In order to be valid, // the following must hold true: +// // rcs[i-1].Base <= rcs[i].Base // rcs[i-1].End <= rcs[i].End // rcs[i-1].End >= rcs[i].Base diff --git a/vendor/github.com/dsnet/compress/internal/release.go b/vendor/github.com/dsnet/compress/internal/release.go index 0990be1c..2d25f2fa 100644 --- a/vendor/github.com/dsnet/compress/internal/release.go +++ b/vendor/github.com/dsnet/compress/internal/release.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.md file. +//go:build !debug && !gofuzz // +build !debug,!gofuzz package internal diff --git a/vendor/github.com/hashicorp/golang-lru/v2/.gitignore b/vendor/github.com/hashicorp/golang-lru/v2/.gitignore new file mode 100644 index 00000000..83656241 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml b/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml new file mode 100644 index 00000000..7e7b8a96 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml @@ -0,0 +1,46 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +linters: + fast: false + disable-all: true + enable: + - revive + - megacheck + - govet + - unconvert + - gas + - gocyclo + - dupl + - misspell + - unparam + - unused + - typecheck + - ineffassign + # - stylecheck + - exportloopref + - gocritic + - nakedret + - gosimple + - prealloc + +# golangci-lint configuration file +linters-settings: + revive: + ignore-generated-header: true + severity: warning + rules: + - name: package-comments + severity: warning + disabled: true + - name: exported + severity: warning + disabled: false + arguments: ["checkPrivateReceivers", "disableStutteringCheck"] + +issues: + exclude-use-default: false + exclude-rules: + - path: _test\.go + linters: + - dupl diff --git a/vendor/github.com/hashicorp/golang-lru/v2/2q.go b/vendor/github.com/hashicorp/golang-lru/v2/2q.go new file mode 100644 index 00000000..8c95252b --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/2q.go @@ -0,0 +1,267 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lru + +import ( + "errors" + "sync" + + "github.com/hashicorp/golang-lru/v2/simplelru" +) + +const ( + // Default2QRecentRatio is the ratio of the 2Q cache dedicated + // to recently added entries that have only been accessed once. + Default2QRecentRatio = 0.25 + + // Default2QGhostEntries is the default ratio of ghost + // entries kept to track entries recently evicted + Default2QGhostEntries = 0.50 +) + +// TwoQueueCache is a thread-safe fixed size 2Q cache. +// 2Q is an enhancement over the standard LRU cache +// in that it tracks both frequently and recently used +// entries separately. This avoids a burst in access to new +// entries from evicting frequently used entries. It adds some +// additional tracking overhead to the standard LRU cache, and is +// computationally about 2x the cost, and adds some metadata over +// head. The ARCCache is similar, but does not require setting any +// parameters. +type TwoQueueCache[K comparable, V any] struct { + size int + recentSize int + recentRatio float64 + ghostRatio float64 + + recent simplelru.LRUCache[K, V] + frequent simplelru.LRUCache[K, V] + recentEvict simplelru.LRUCache[K, struct{}] + lock sync.RWMutex +} + +// New2Q creates a new TwoQueueCache using the default +// values for the parameters. +func New2Q[K comparable, V any](size int) (*TwoQueueCache[K, V], error) { + return New2QParams[K, V](size, Default2QRecentRatio, Default2QGhostEntries) +} + +// New2QParams creates a new TwoQueueCache using the provided +// parameter values. +func New2QParams[K comparable, V any](size int, recentRatio, ghostRatio float64) (*TwoQueueCache[K, V], error) { + if size <= 0 { + return nil, errors.New("invalid size") + } + if recentRatio < 0.0 || recentRatio > 1.0 { + return nil, errors.New("invalid recent ratio") + } + if ghostRatio < 0.0 || ghostRatio > 1.0 { + return nil, errors.New("invalid ghost ratio") + } + + // Determine the sub-sizes + recentSize := int(float64(size) * recentRatio) + evictSize := int(float64(size) * ghostRatio) + + // Allocate the LRUs + recent, err := simplelru.NewLRU[K, V](size, nil) + if err != nil { + return nil, err + } + frequent, err := simplelru.NewLRU[K, V](size, nil) + if err != nil { + return nil, err + } + recentEvict, err := simplelru.NewLRU[K, struct{}](evictSize, nil) + if err != nil { + return nil, err + } + + // Initialize the cache + c := &TwoQueueCache[K, V]{ + size: size, + recentSize: recentSize, + recentRatio: recentRatio, + ghostRatio: ghostRatio, + recent: recent, + frequent: frequent, + recentEvict: recentEvict, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *TwoQueueCache[K, V]) Get(key K) (value V, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if this is a frequent value + if val, ok := c.frequent.Get(key); ok { + return val, ok + } + + // If the value is contained in recent, then we + // promote it to frequent + if val, ok := c.recent.Peek(key); ok { + c.recent.Remove(key) + c.frequent.Add(key, val) + return val, ok + } + + // No hit + return +} + +// Add adds a value to the cache. +func (c *TwoQueueCache[K, V]) Add(key K, value V) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is frequently used already, + // and just update the value + if c.frequent.Contains(key) { + c.frequent.Add(key, value) + return + } + + // Check if the value is recently used, and promote + // the value into the frequent list + if c.recent.Contains(key) { + c.recent.Remove(key) + c.frequent.Add(key, value) + return + } + + // If the value was recently evicted, add it to the + // frequently used list + if c.recentEvict.Contains(key) { + c.ensureSpace(true) + c.recentEvict.Remove(key) + c.frequent.Add(key, value) + return + } + + // Add to the recently seen list + c.ensureSpace(false) + c.recent.Add(key, value) +} + +// ensureSpace is used to ensure we have space in the cache +func (c *TwoQueueCache[K, V]) ensureSpace(recentEvict bool) { + // If we have space, nothing to do + recentLen := c.recent.Len() + freqLen := c.frequent.Len() + if recentLen+freqLen < c.size { + return + } + + // If the recent buffer is larger than + // the target, evict from there + if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { + k, _, _ := c.recent.RemoveOldest() + c.recentEvict.Add(k, struct{}{}) + return + } + + // Remove from the frequent list otherwise + c.frequent.RemoveOldest() +} + +// Len returns the number of items in the cache. +func (c *TwoQueueCache[K, V]) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.recent.Len() + c.frequent.Len() +} + +// Resize changes the cache size. +func (c *TwoQueueCache[K, V]) Resize(size int) (evicted int) { + c.lock.Lock() + defer c.lock.Unlock() + + // Recalculate the sub-sizes + recentSize := int(float64(size) * c.recentRatio) + evictSize := int(float64(size) * c.ghostRatio) + c.size = size + c.recentSize = recentSize + + // ensureSpace + diff := c.recent.Len() + c.frequent.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.ensureSpace(true) + } + + // Reallocate the LRUs + c.recent.Resize(size) + c.frequent.Resize(size) + c.recentEvict.Resize(evictSize) + + return diff +} + +// Keys returns a slice of the keys in the cache. +// The frequently used keys are first in the returned slice. +func (c *TwoQueueCache[K, V]) Keys() []K { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.frequent.Keys() + k2 := c.recent.Keys() + return append(k1, k2...) +} + +// Values returns a slice of the values in the cache. +// The frequently used values are first in the returned slice. +func (c *TwoQueueCache[K, V]) Values() []V { + c.lock.RLock() + defer c.lock.RUnlock() + v1 := c.frequent.Values() + v2 := c.recent.Values() + return append(v1, v2...) +} + +// Remove removes the provided key from the cache. +func (c *TwoQueueCache[K, V]) Remove(key K) { + c.lock.Lock() + defer c.lock.Unlock() + if c.frequent.Remove(key) { + return + } + if c.recent.Remove(key) { + return + } + if c.recentEvict.Remove(key) { + return + } +} + +// Purge is used to completely clear the cache. +func (c *TwoQueueCache[K, V]) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.recent.Purge() + c.frequent.Purge() + c.recentEvict.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *TwoQueueCache[K, V]) Contains(key K) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.frequent.Contains(key) || c.recent.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *TwoQueueCache[K, V]) Peek(key K) (value V, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.frequent.Peek(key); ok { + return val, ok + } + return c.recent.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/LICENSE b/vendor/github.com/hashicorp/golang-lru/v2/LICENSE new file mode 100644 index 00000000..0e5d580e --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/LICENSE @@ -0,0 +1,364 @@ +Copyright (c) 2014 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/v2/README.md b/vendor/github.com/hashicorp/golang-lru/v2/README.md new file mode 100644 index 00000000..a942eb53 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/README.md @@ -0,0 +1,79 @@ +golang-lru +========== + +This provides the `lru` package which implements a fixed-size +thread safe LRU cache. It is based on the cache in Groupcache. + +Documentation +============= + +Full docs are available on [Go Packages](https://pkg.go.dev/github.com/hashicorp/golang-lru/v2) + +LRU cache example +================= + +```go +package main + +import ( + "fmt" + "github.com/hashicorp/golang-lru/v2" +) + +func main() { + l, _ := lru.New[int, any](128) + for i := 0; i < 256; i++ { + l.Add(i, nil) + } + if l.Len() != 128 { + panic(fmt.Sprintf("bad len: %v", l.Len())) + } +} +``` + +Expirable LRU cache example +=========================== + +```go +package main + +import ( + "fmt" + "time" + + "github.com/hashicorp/golang-lru/v2/expirable" +) + +func main() { + // make cache with 10ms TTL and 5 max keys + cache := expirable.NewLRU[string, string](5, nil, time.Millisecond*10) + + + // set value under key1. + cache.Add("key1", "val1") + + // get value under key1 + r, ok := cache.Get("key1") + + // check for OK value + if ok { + fmt.Printf("value before expiration is found: %v, value: %q\n", ok, r) + } + + // wait for cache to expire + time.Sleep(time.Millisecond * 12) + + // get value under key1 after key expiration + r, ok = cache.Get("key1") + fmt.Printf("value after expiration is found: %v, value: %q\n", ok, r) + + // set value under key2, would evict old entry because it is already expired. + cache.Add("key2", "val2") + + fmt.Printf("Cache len: %d\n", cache.Len()) + // Output: + // value before expiration is found: true, value: "val1" + // value after expiration is found: false, value: "" + // Cache len: 1 +} +``` diff --git a/vendor/github.com/hashicorp/golang-lru/v2/doc.go b/vendor/github.com/hashicorp/golang-lru/v2/doc.go new file mode 100644 index 00000000..24107ee0 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the LRU implementation in +// groupcache: https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, at +// the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as well +// as recent usage in both the frequent and recent caches. Its computational +// overhead is comparable to TwoQueueCache, but the memory overhead is linear +// with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. For this reason, it is in a separate go module contained within +// this repository. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go new file mode 100644 index 00000000..5cd74a03 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go @@ -0,0 +1,142 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE_list file. + +package internal + +import "time" + +// Entry is an LRU Entry +type Entry[K comparable, V any] struct { + // Next and previous pointers in the doubly-linked list of elements. + // To simplify the implementation, internally a list l is implemented + // as a ring, such that &l.root is both the next element of the last + // list element (l.Back()) and the previous element of the first list + // element (l.Front()). + next, prev *Entry[K, V] + + // The list to which this element belongs. + list *LruList[K, V] + + // The LRU Key of this element. + Key K + + // The Value stored with this element. + Value V + + // The time this element would be cleaned up, optional + ExpiresAt time.Time + + // The expiry bucket item was put in, optional + ExpireBucket uint8 +} + +// PrevEntry returns the previous list element or nil. +func (e *Entry[K, V]) PrevEntry() *Entry[K, V] { + if p := e.prev; e.list != nil && p != &e.list.root { + return p + } + return nil +} + +// LruList represents a doubly linked list. +// The zero Value for LruList is an empty list ready to use. +type LruList[K comparable, V any] struct { + root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used + len int // current list Length excluding (this) sentinel element +} + +// Init initializes or clears list l. +func (l *LruList[K, V]) Init() *LruList[K, V] { + l.root.next = &l.root + l.root.prev = &l.root + l.len = 0 + return l +} + +// NewList returns an initialized list. +func NewList[K comparable, V any]() *LruList[K, V] { return new(LruList[K, V]).Init() } + +// Length returns the number of elements of list l. +// The complexity is O(1). +func (l *LruList[K, V]) Length() int { return l.len } + +// Back returns the last element of list l or nil if the list is empty. +func (l *LruList[K, V]) Back() *Entry[K, V] { + if l.len == 0 { + return nil + } + return l.root.prev +} + +// lazyInit lazily initializes a zero List Value. +func (l *LruList[K, V]) lazyInit() { + if l.root.next == nil { + l.Init() + } +} + +// insert inserts e after at, increments l.len, and returns e. +func (l *LruList[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] { + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e + e.list = l + l.len++ + return e +} + +// insertValue is a convenience wrapper for insert(&Entry{Value: v, ExpiresAt: ExpiresAt}, at). +func (l *LruList[K, V]) insertValue(k K, v V, expiresAt time.Time, at *Entry[K, V]) *Entry[K, V] { + return l.insert(&Entry[K, V]{Value: v, Key: k, ExpiresAt: expiresAt}, at) +} + +// Remove removes e from its list, decrements l.len +func (l *LruList[K, V]) Remove(e *Entry[K, V]) V { + e.prev.next = e.next + e.next.prev = e.prev + e.next = nil // avoid memory leaks + e.prev = nil // avoid memory leaks + e.list = nil + l.len-- + + return e.Value +} + +// move moves e to next to at. +func (l *LruList[K, V]) move(e, at *Entry[K, V]) { + if e == at { + return + } + e.prev.next = e.next + e.next.prev = e.prev + + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e +} + +// PushFront inserts a new element e with value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFront(k K, v V) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, time.Time{}, &l.root) +} + +// PushFrontExpirable inserts a new expirable element e with Value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFrontExpirable(k K, v V, expiresAt time.Time) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, expiresAt, &l.root) +} + +// MoveToFront moves element e to the front of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *LruList[K, V]) MoveToFront(e *Entry[K, V]) { + if e.list != l || l.root.next == e { + return + } + // see comment in List.Remove about initialization of l + l.move(e, &l.root) +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/lru.go new file mode 100644 index 00000000..a2655f1f --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/lru.go @@ -0,0 +1,250 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/v2/simplelru" +) + +const ( + // DefaultEvictedBufferSize defines the default buffer size to store evicted key/val + DefaultEvictedBufferSize = 16 +) + +// Cache is a thread-safe fixed size LRU cache. +type Cache[K comparable, V any] struct { + lru *simplelru.LRU[K, V] + evictedKeys []K + evictedVals []V + onEvictedCB func(k K, v V) + lock sync.RWMutex +} + +// New creates an LRU of the given size. +func New[K comparable, V any](size int) (*Cache[K, V], error) { + return NewWithEvict[K, V](size, nil) +} + +// NewWithEvict constructs a fixed size cache with the given eviction +// callback. +func NewWithEvict[K comparable, V any](size int, onEvicted func(key K, value V)) (c *Cache[K, V], err error) { + // create a cache with default settings + c = &Cache[K, V]{ + onEvictedCB: onEvicted, + } + if onEvicted != nil { + c.initEvictBuffers() + onEvicted = c.onEvicted + } + c.lru, err = simplelru.NewLRU(size, onEvicted) + return +} + +func (c *Cache[K, V]) initEvictBuffers() { + c.evictedKeys = make([]K, 0, DefaultEvictedBufferSize) + c.evictedVals = make([]V, 0, DefaultEvictedBufferSize) +} + +// onEvicted save evicted key/val and sent in externally registered callback +// outside of critical section +func (c *Cache[K, V]) onEvicted(k K, v V) { + c.evictedKeys = append(c.evictedKeys, k) + c.evictedVals = append(c.evictedVals, v) +} + +// Purge is used to completely clear the cache. +func (c *Cache[K, V]) Purge() { + var ks []K + var vs []V + c.lock.Lock() + c.lru.Purge() + if c.onEvictedCB != nil && len(c.evictedKeys) > 0 { + ks, vs = c.evictedKeys, c.evictedVals + c.initEvictBuffers() + } + c.lock.Unlock() + // invoke callback outside of critical section + if c.onEvictedCB != nil { + for i := 0; i < len(ks); i++ { + c.onEvictedCB(ks[i], vs[i]) + } + } +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *Cache[K, V]) Add(key K, value V) (evicted bool) { + var k K + var v V + c.lock.Lock() + evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } + return +} + +// Get looks up a key's value from the cache. +func (c *Cache[K, V]) Get(key K) (value V, ok bool) { + c.lock.Lock() + value, ok = c.lru.Get(key) + c.lock.Unlock() + return value, ok +} + +// Contains checks if a key is in the cache, without updating the +// recent-ness or deleting it for being stale. +func (c *Cache[K, V]) Contains(key K) bool { + c.lock.RLock() + containKey := c.lru.Contains(key) + c.lock.RUnlock() + return containKey +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *Cache[K, V]) Peek(key K) (value V, ok bool) { + c.lock.RLock() + value, ok = c.lru.Peek(key) + c.lock.RUnlock() + return value, ok +} + +// ContainsOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache[K, V]) ContainsOrAdd(key K, value V) (ok, evicted bool) { + var k K + var v V + c.lock.Lock() + if c.lru.Contains(key) { + c.lock.Unlock() + return true, false + } + evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } + return false, evicted +} + +// PeekOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache[K, V]) PeekOrAdd(key K, value V) (previous V, ok, evicted bool) { + var k K + var v V + c.lock.Lock() + previous, ok = c.lru.Peek(key) + if ok { + c.lock.Unlock() + return previous, true, false + } + evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache[K, V]) Remove(key K) (present bool) { + var k K + var v V + c.lock.Lock() + present = c.lru.Remove(key) + if c.onEvictedCB != nil && present { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && present { + c.onEvictedCB(k, v) + } + return +} + +// Resize changes the cache size. +func (c *Cache[K, V]) Resize(size int) (evicted int) { + var ks []K + var vs []V + c.lock.Lock() + evicted = c.lru.Resize(size) + if c.onEvictedCB != nil && evicted > 0 { + ks, vs = c.evictedKeys, c.evictedVals + c.initEvictBuffers() + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted > 0 { + for i := 0; i < len(ks); i++ { + c.onEvictedCB(ks[i], vs[i]) + } + } + return evicted +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache[K, V]) RemoveOldest() (key K, value V, ok bool) { + var k K + var v V + c.lock.Lock() + key, value, ok = c.lru.RemoveOldest() + if c.onEvictedCB != nil && ok { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && ok { + c.onEvictedCB(k, v) + } + return +} + +// GetOldest returns the oldest entry +func (c *Cache[K, V]) GetOldest() (key K, value V, ok bool) { + c.lock.RLock() + key, value, ok = c.lru.GetOldest() + c.lock.RUnlock() + return +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *Cache[K, V]) Keys() []K { + c.lock.RLock() + keys := c.lru.Keys() + c.lock.RUnlock() + return keys +} + +// Values returns a slice of the values in the cache, from oldest to newest. +func (c *Cache[K, V]) Values() []V { + c.lock.RLock() + values := c.lru.Values() + c.lock.RUnlock() + return values +} + +// Len returns the number of items in the cache. +func (c *Cache[K, V]) Len() int { + c.lock.RLock() + length := c.lru.Len() + c.lock.RUnlock() + return length +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list new file mode 100644 index 00000000..c4764e6b --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list @@ -0,0 +1,29 @@ +This license applies to simplelru/list.go + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go new file mode 100644 index 00000000..f6979238 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go @@ -0,0 +1,177 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package simplelru + +import ( + "errors" + + "github.com/hashicorp/golang-lru/v2/internal" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback[K comparable, V any] func(key K, value V) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU[K comparable, V any] struct { + size int + evictList *internal.LruList[K, V] + items map[K]*internal.Entry[K, V] + onEvict EvictCallback[K, V] +} + +// NewLRU constructs an LRU of the given size +func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K, V], error) { + if size <= 0 { + return nil, errors.New("must provide a positive size") + } + + c := &LRU[K, V]{ + size: size, + evictList: internal.NewList[K, V](), + items: make(map[K]*internal.Entry[K, V]), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU[K, V]) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU[K, V]) Add(key K, value V) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value = value + return false + } + + // Add new item + ent := c.evictList.PushFront(key, value) + c.items[key] = ent + + evict := c.evictList.Length() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU[K, V]) Get(key K) (value V, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU[K, V]) Contains(key K) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU[K, V]) Peek(key K) (value V, ok bool) { + var ent *internal.Entry[K, V] + if ent, ok = c.items[key]; ok { + return ent.Value, true + } + return +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU[K, V]) Remove(key K) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU[K, V]) RemoveOldest() (key K, value V, ok bool) { + if ent := c.evictList.Back(); ent != nil { + c.removeElement(ent) + return ent.Key, ent.Value, true + } + return +} + +// GetOldest returns the oldest entry +func (c *LRU[K, V]) GetOldest() (key K, value V, ok bool) { + if ent := c.evictList.Back(); ent != nil { + return ent.Key, ent.Value, true + } + return +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU[K, V]) Keys() []K { + keys := make([]K, c.evictList.Length()) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + keys[i] = ent.Key + i++ + } + return keys +} + +// Values returns a slice of the values in the cache, from oldest to newest. +func (c *LRU[K, V]) Values() []V { + values := make([]V, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + values[i] = ent.Value + i++ + } + return values +} + +// Len returns the number of items in the cache. +func (c *LRU[K, V]) Len() int { + return c.evictList.Length() +} + +// Resize changes the cache size. +func (c *LRU[K, V]) Resize(size int) (evicted int) { + diff := c.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.removeOldest() + } + c.size = size + return diff +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU[K, V]) removeOldest() { + if ent := c.evictList.Back(); ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU[K, V]) removeElement(e *internal.Entry[K, V]) { + c.evictList.Remove(e) + delete(c.items, e.Key) + if c.onEvict != nil { + c.onEvict(e.Key, e.Value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go new file mode 100644 index 00000000..043b8bcc --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package simplelru provides simple LRU implementation based on build-in container/list. +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache[K comparable, V any] interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key K, value V) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key K) (value V, ok bool) + + // Checks if a key exists in cache without updating the recent-ness. + Contains(key K) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key K) (value V, ok bool) + + // Removes a key from the cache. + Remove(key K) bool + + // Removes the oldest entry from cache. + RemoveOldest() (K, V, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (K, V, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []K + + // Values returns a slice of the values in the cache, from oldest to newest. + Values() []V + + // Returns the number of items in the cache. + Len() int + + // Clears all cache entries. + Purge() + + // Resizes cache, returning number evicted + Resize(int) int +} diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index a2295380..4528059c 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -1,5 +1,5 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com +version: 2 + before: hooks: - ./gen.sh @@ -99,7 +99,7 @@ archives: checksum: name_template: 'checksums.txt' snapshot: - name_template: "{{ .Tag }}-next" + version_template: "{{ .Tag }}-next" changelog: sort: asc filters: diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 05c7359e..de264c85 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,27 @@ This package provides various compression algorithms. # changelog +* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) + * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 + * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 + * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 + * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 + * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 + +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + * Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 @@ -81,7 +102,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 @@ -136,7 +157,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 @@ -339,7 +360,7 @@ While the release has been extensively tested, it is recommended to testing when * s2: Fix binaries. * Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) @@ -518,7 +539,7 @@ While the release has been extensively tested, it is recommended to testing when * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. * Feb 19, 2016: Handle small payloads faster in level 1-3. * Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. * Feb 14, 2016: Snappy: Merge upstream changes. * Feb 14, 2016: Snappy: Fix aggressive skipping. * Feb 14, 2016: Snappy: Update benchmark. diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 66d1657d..af53fb86 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -861,7 +861,7 @@ func (d *compressor) reset(w io.Writer) { } switch d.compressionLevel.chain { case 0: - // level was NoCompression or ConstantCompresssion. + // level was NoCompression or ConstantCompression. d.windowEnd = 0 default: s := d.state diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 2f410d64..0d7b437f 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -298,6 +298,14 @@ const ( huffmanGenericReader ) +// flushMode tells decompressor when to return data +type flushMode uint8 + +const ( + syncFlush flushMode = iota // return data after sync flush block + partialFlush // return data after each block +) + // Decompress state. type decompressor struct { // Input source. @@ -332,6 +340,8 @@ type decompressor struct { nb uint final bool + + flushMode flushMode } func (f *decompressor) nextBlock() { @@ -618,7 +628,10 @@ func (f *decompressor) dataBlock() { } if n == 0 { - f.toRead = f.dict.readFlush() + if f.flushMode == syncFlush { + f.toRead = f.dict.readFlush() + } + f.finishBlock() return } @@ -657,8 +670,12 @@ func (f *decompressor) finishBlock() { if f.dict.availRead() > 0 { f.toRead = f.dict.readFlush() } + f.err = io.EOF + } else if f.flushMode == partialFlush && f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() } + f.step = nextBlock } @@ -789,15 +806,25 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error { return nil } -// NewReader returns a new ReadCloser that can be used -// to read the uncompressed version of r. -// If r does not also implement io.ByteReader, -// the decompressor may read more data than necessary from r. -// It is the caller's responsibility to call Close on the ReadCloser -// when finished reading. -// -// The ReadCloser returned by NewReader also implements Resetter. -func NewReader(r io.Reader) io.ReadCloser { +type ReaderOpt func(*decompressor) + +// WithPartialBlock tells decompressor to return after each block, +// so it can read data written with partial flush +func WithPartialBlock() ReaderOpt { + return func(f *decompressor) { + f.flushMode = partialFlush + } +} + +// WithDict initializes the reader with a preset dictionary +func WithDict(dict []byte) ReaderOpt { + return func(f *decompressor) { + f.dict.init(maxMatchOffset, dict) + } +} + +// NewReaderOpts returns new reader with provided options +func NewReaderOpts(r io.Reader, opts ...ReaderOpt) io.ReadCloser { fixedHuffmanDecoderInit() var f decompressor @@ -806,9 +833,26 @@ func NewReader(r io.Reader) io.ReadCloser { f.codebits = new([numCodes]int) f.step = nextBlock f.dict.init(maxMatchOffset, nil) + + for _, opt := range opts { + opt(&f) + } + return &f } +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + return NewReaderOpts(r) +} + // NewReaderDict is like NewReader but initializes the reader // with a preset dictionary. The returned Reader behaves as if // the uncompressed data stream started with the given dictionary, @@ -817,13 +861,5 @@ func NewReader(r io.Reader) io.ReadCloser { // // The ReadCloser returned by NewReader also implements Resetter. func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { - fixedHuffmanDecoderInit() - - var f decompressor - f.r = makeReader(r) - f.bits = new([maxNumLit + maxNumDist]int) - f.codebits = new([numCodes]int) - f.step = nextBlock - f.dict.init(maxMatchOffset, dict) - return &f + return NewReaderOpts(r, WithDict(dict)) } diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s index 9a7655c0..0782b86e 100644 --- a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s +++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s @@ -5,7 +5,6 @@ #include "textflag.h" // func matchLen(a []byte, b []byte) int -// Requires: BMI TEXT ·matchLen(SB), NOSPLIT, $0-56 MOVQ a_base+0(FP), AX MOVQ b_base+24(FP), CX @@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56 JB matchlen_match4_standalone matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone #ifdef GOAMD64_v3 TZCNTQ BX, BX #else BSFQ BX, BX #endif - SARQ $0x03, BX + SHRL $0x03, BX LEAL (SI)(BX*1), SI JMP gen_match_len_end diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index cc05d0f7..0c7dd4ff 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -15,7 +15,7 @@ const ( // It is possible, but by no way guaranteed that corrupt data will // return an error. // It is up to the caller to verify integrity of the returned data. -// Use a predefined Scrach to set maximum acceptable output size. +// Use a predefined Scratch to set maximum acceptable output size. func Decompress(b []byte, s *Scratch) ([]byte, error) { s, err := s.prepare(b) if err != nil { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 54bd08b2..0f56b02d 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) continue } // Ensure that all combinations are covered. @@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) + fmt.Fprintf(w, "%d errors, stopping\n", errs) break } } diff --git a/vendor/github.com/klauspost/compress/internal/godebug/godebug.go b/vendor/github.com/klauspost/compress/internal/godebug/godebug.go new file mode 100644 index 00000000..ff13f2a0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/godebug/godebug.go @@ -0,0 +1,44 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package godebug makes the simplified settings in the $GODEBUG environment variable +// available to packages. +// Needed since internal/godebug is not available here. +package godebug + +import "os" + +func Get(key string) string { + s := os.Getenv("GODEBUG") + if s == "" { + return "" + } + // Scan the string backward so that later settings are used + // and earlier settings are ignored. + // Note that a forward scan would cause cached values + // to temporarily use the ignored value before being + // updated to the "correct" one. + end := len(s) + eq := -1 + for i := end - 1; i >= -1; i-- { + if i == -1 || s[i] == ',' { + if eq >= 0 { + name, arg := s[i+1:eq], s[eq+1:end] + if name == key { + for j := 0; j < len(arg); j++ { + if arg[j] == '#' { + return arg[:j] + } + } + return arg + } + } + eq = -1 + end = i + } else if s[i] == '=' { + eq = i + } + } + return "" +} diff --git a/vendor/github.com/klauspost/compress/zip/reader.go b/vendor/github.com/klauspost/compress/zip/reader.go index 460394ca..c3bcc883 100644 --- a/vendor/github.com/klauspost/compress/zip/reader.go +++ b/vendor/github.com/klauspost/compress/zip/reader.go @@ -14,16 +14,20 @@ import ( "io/fs" "os" "path" + "path/filepath" "sort" "strings" "sync" "time" + + "github.com/klauspost/compress/internal/godebug" ) var ( - ErrFormat = errors.New("zip: not a valid zip file") - ErrAlgorithm = errors.New("zip: unsupported compression algorithm") - ErrChecksum = errors.New("zip: checksum error") + ErrFormat = errors.New("zip: not a valid zip file") + ErrAlgorithm = errors.New("zip: unsupported compression algorithm") + ErrChecksum = errors.New("zip: checksum error") + ErrInsecurePath = errors.New("zip: insecure file path") ) // A Reader serves content from a ZIP archive. @@ -43,15 +47,15 @@ type Reader struct { fileList []fileListEntry } -// A ReadCloser is a Reader that must be closed when no longer needed. +// A ReadCloser is a [Reader] that must be closed when no longer needed. type ReadCloser struct { f *os.File Reader } // A File is a single file in a ZIP archive. -// The file information is in the embedded FileHeader. -// The file content can be accessed by calling Open. +// The file information is in the embedded [FileHeader]. +// The file content can be accessed by calling [File.Open]. type File struct { FileHeader zip *Reader @@ -61,6 +65,14 @@ type File struct { } // OpenReader will open the Zip file specified by name and return a ReadCloser. +// +// If any file inside the archive uses a non-local name +// (as defined by [filepath.IsLocal]) or a name containing backslashes +// and the GODEBUG environment variable contains `zipinsecurepath=0`, +// OpenReader returns the reader with an ErrInsecurePath error. +// A future version of Go may introduce this behavior by default. +// Programs that want to accept non-local names can ignore +// the ErrInsecurePath error and use the returned reader. func OpenReader(name string) (*ReadCloser, error) { f, err := os.Open(name) if err != nil { @@ -72,100 +84,111 @@ func OpenReader(name string) (*ReadCloser, error) { return nil, err } r := new(ReadCloser) - if err := r.init(f, fi.Size()); err != nil { + if err = r.init(f, fi.Size()); err != nil && err != ErrInsecurePath { f.Close() return nil, err } r.f = f - return r, nil + return r, err } -// NewReader returns a new Reader reading from r, which is assumed to +// NewReader returns a new [Reader] reading from r, which is assumed to // have the given size in bytes. +// +// If any file inside the archive uses a non-local name +// (as defined by [filepath.IsLocal]) or a name containing backslashes +// and the GODEBUG environment variable contains `zipinsecurepath=0`, +// NewReader returns the reader with an [ErrInsecurePath] error. +// A future version of Go may introduce this behavior by default. +// Programs that want to accept non-local names can ignore +// the [ErrInsecurePath] error and use the returned reader. func NewReader(r io.ReaderAt, size int64) (*Reader, error) { if size < 0 { return nil, errors.New("zip: size cannot be negative") } zr := new(Reader) - if err := zr.init(r, size); err != nil { + var err error + if err = zr.init(r, size); err != nil && err != ErrInsecurePath { return nil, err } - return zr, nil + return zr, err } -func (z *Reader) init(r io.ReaderAt, size int64) error { - end, baseOffset, err := readDirectoryEnd(r, size) +func (r *Reader) init(rdr io.ReaderAt, size int64) error { + end, baseOffset, err := readDirectoryEnd(rdr, size) if err != nil { return err } - z.r = r - z.baseOffset = baseOffset + r.r = rdr + r.baseOffset = baseOffset // Since the number of directory records is not validated, it is not - // safe to preallocate z.File without first checking that the specified + // safe to preallocate r.File without first checking that the specified // number of files is reasonable, since a malformed archive may // indicate it contains up to 1 << 128 - 1 files. Since each file has a // header which will be _at least_ 30 bytes we can safely preallocate // if (data size / 30) >= end.directoryRecords. if end.directorySize < uint64(size) && (uint64(size)-end.directorySize)/30 >= end.directoryRecords { - z.File = make([]*File, 0, end.directoryRecords) + r.File = make([]*File, 0, end.directoryRecords) } - z.Comment = end.comment - rs := io.NewSectionReader(r, 0, size) - if _, err = rs.Seek(z.baseOffset+int64(end.directoryOffset), io.SeekStart); err != nil { + r.Comment = end.comment + rs := io.NewSectionReader(rdr, 0, size) + if _, err = rs.Seek(r.baseOffset+int64(end.directoryOffset), io.SeekStart); err != nil { return err } buf := bufio.NewReader(rs) + // Get once + zipinsecurepath := godebug.Get("zipinsecurepath") == "0" + // The count of files inside a zip is truncated to fit in a uint16. // Gloss over this by reading headers until we encounter // a bad one, and then only report an ErrFormat or UnexpectedEOF if // the file count modulo 65536 is incorrect. for { - f := &File{zip: z, zipr: r} + f := &File{zip: r, zipr: rdr} err = readDirectoryHeader(f, buf) - - // For compatibility with other zip programs, - // if we have a non-zero base offset and can't read - // the first directory header, try again with a zero - // base offset. - if err == ErrFormat && z.baseOffset != 0 && len(z.File) == 0 { - z.baseOffset = 0 - if _, err = rs.Seek(int64(end.directoryOffset), io.SeekStart); err != nil { - return err - } - buf.Reset(rs) - continue - } - if err == ErrFormat || err == io.ErrUnexpectedEOF { break } if err != nil { return err } - f.headerOffset += z.baseOffset - z.File = append(z.File, f) + f.headerOffset += r.baseOffset + r.File = append(r.File, f) } - if uint16(len(z.File)) != uint16(end.directoryRecords) { // only compare 16 bits here + if uint16(len(r.File)) != uint16(end.directoryRecords) { // only compare 16 bits here // Return the readDirectoryHeader error if we read // the wrong number of directory entries. return err } + if zipinsecurepath { + for _, f := range r.File { + if f.Name == "" { + // Zip permits an empty file name field. + continue + } + // The zip specification states that names must use forward slashes, + // so consider any backslashes in the name insecure. + if !filepath.IsLocal(f.Name) || strings.Contains(f.Name, `\`) { + return ErrInsecurePath + } + } + } return nil } // RegisterDecompressor registers or overrides a custom decompressor for a // specific method ID. If a decompressor for a given method is not found, -// Reader will default to looking up the decompressor at the package level. -func (z *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) { - if z.decompressors == nil { - z.decompressors = make(map[uint16]Decompressor) +// [Reader] will default to looking up the decompressor at the package level. +func (r *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) { + if r.decompressors == nil { + r.decompressors = make(map[uint16]Decompressor) } - z.decompressors[method] = dcomp + r.decompressors[method] = dcomp } -func (z *Reader) decompressor(method uint16) Decompressor { - dcomp := z.decompressors[method] +func (r *Reader) decompressor(method uint16) Decompressor { + dcomp := r.decompressors[method] if dcomp == nil { dcomp = decompressor(method) } @@ -180,7 +203,7 @@ func (rc *ReadCloser) Close() error { // DataOffset returns the offset of the file's possibly-compressed // data, relative to the beginning of the zip file. // -// Most callers should instead use Open, which transparently +// Most callers should instead use [File.Open], which transparently // decompresses data and verifies checksums. func (f *File) DataOffset() (offset int64, err error) { bodyOffset, err := f.findBodyOffset() @@ -190,13 +213,29 @@ func (f *File) DataOffset() (offset int64, err error) { return f.headerOffset + bodyOffset, nil } -// Open returns a ReadCloser that provides access to the File's contents. +// Open returns a [ReadCloser] that provides access to the [File]'s contents. // Multiple files may be read concurrently. func (f *File) Open() (io.ReadCloser, error) { bodyOffset, err := f.findBodyOffset() if err != nil { return nil, err } + if strings.HasSuffix(f.Name, "/") { + // The ZIP specification (APPNOTE.TXT) specifies that directories, which + // are technically zero-byte files, must not have any associated file + // data. We previously tried failing here if f.CompressedSize64 != 0, + // but it turns out that a number of implementations (namely, the Java + // jar tool) don't properly set the storage method on directories + // resulting in a file with compressed size > 0 but uncompressed size == + // 0. We still want to fail when a directory has associated uncompressed + // data, but we are tolerant of cases where the uncompressed size is + // zero but compressed size is not. + if f.UncompressedSize64 != 0 { + return &dirReader{ErrFormat}, nil + } else { + return &dirReader{io.EOF}, nil + } + } size := int64(f.CompressedSize64) r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size) dcomp := f.zip.decompressor(f.Method) @@ -217,7 +256,7 @@ func (f *File) Open() (io.ReadCloser, error) { return rc, nil } -// OpenRaw returns a Reader that provides access to the File's contents without +// OpenRaw returns a [Reader] that provides access to the [File]'s contents without // decompression. func (f *File) OpenRaw() (io.Reader, error) { bodyOffset, err := f.findBodyOffset() @@ -228,6 +267,18 @@ func (f *File) OpenRaw() (io.Reader, error) { return r, nil } +type dirReader struct { + err error +} + +func (r *dirReader) Read([]byte) (int, error) { + return 0, r.err +} + +func (r *dirReader) Close() error { + return nil +} + type checksumReader struct { rc io.ReadCloser hash hash.Hash32 @@ -419,8 +470,8 @@ parseExtras: const ticksPerSecond = 1e7 // Windows timestamp resolution ts := int64(attrBuf.uint64()) // ModTime since Windows epoch - secs := int64(ts / ticksPerSecond) - nsecs := (1e9 / ticksPerSecond) * int64(ts%ticksPerSecond) + secs := ts / ticksPerSecond + nsecs := (1e9 / ticksPerSecond) * (ts % ticksPerSecond) epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC) modified = time.Unix(epoch.Unix()+secs, nsecs) } @@ -565,12 +616,31 @@ func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, baseOffset } } + maxInt64 := uint64(1<<63 - 1) + if d.directorySize > maxInt64 || d.directoryOffset > maxInt64 { + return nil, 0, ErrFormat + } + baseOffset = directoryEndOffset - int64(d.directorySize) - int64(d.directoryOffset) // Make sure directoryOffset points to somewhere in our file. if o := baseOffset + int64(d.directoryOffset); o < 0 || o >= size { return nil, 0, ErrFormat } + + // If the directory end data tells us to use a non-zero baseOffset, + // but we would find a valid directory entry if we assume that the + // baseOffset is 0, then just use a baseOffset of 0. + // We've seen files in which the directory end data gives us + // an incorrect baseOffset. + if baseOffset > 0 { + off := int64(d.directoryOffset) + rs := io.NewSectionReader(r, off, size-off) + if readDirectoryHeader(&File{}, rs) == nil { + baseOffset = 0 + } + } + return d, baseOffset, nil } @@ -630,9 +700,13 @@ func findSignatureInBlock(b []byte) int { if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 { // n is length of comment n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8 - if n+directoryEndLen+i <= len(b) { - return i + if n+directoryEndLen+i > len(b) { + // Truncated comment. + // Some parsers (such as Info-ZIP) ignore the truncated comment + // rather than treating it as a hard error. + return -1 } + return i } } return -1 @@ -684,14 +758,14 @@ type fileInfoDirEntry interface { fs.DirEntry } -func (e *fileListEntry) stat() (fileInfoDirEntry, error) { - if e.isDup { - return nil, errors.New(e.name + ": duplicate entries in zip file") +func (f *fileListEntry) stat() (fileInfoDirEntry, error) { + if f.isDup { + return nil, errors.New(f.name + ": duplicate entries in zip file") } - if !e.isDir { - return headerFileInfo{&e.file.FileHeader}, nil + if !f.isDir { + return headerFileInfo{&f.file.FileHeader}, nil } - return e, nil + return f, nil } // Only used for directories. @@ -700,7 +774,7 @@ func (f *fileListEntry) Size() int64 { return 0 } func (f *fileListEntry) Mode() fs.FileMode { return fs.ModeDir | 0555 } func (f *fileListEntry) Type() fs.FileMode { return fs.ModeDir } func (f *fileListEntry) IsDir() bool { return true } -func (f *fileListEntry) Sys() interface{} { return nil } +func (f *fileListEntry) Sys() any { return nil } func (f *fileListEntry) ModTime() time.Time { if f.file == nil { @@ -711,12 +785,92 @@ func (f *fileListEntry) ModTime() time.Time { func (f *fileListEntry) Info() (fs.FileInfo, error) { return f, nil } +func (f *fileListEntry) String() string { + return formatDirEntry(f) +} + +// formatDirEntry returns a formatted version of dir for human readability. +// Implementations of [DirEntry] can call this from a String method. +// The outputs for a directory named subdir and a file named hello.go are: +// +// d subdir/ +// - hello.go +// +// TODO: Use fs.FormatDirEntry when Go 1.20 is no longer supported +func formatDirEntry(dir fs.DirEntry) string { + name := dir.Name() + b := make([]byte, 0, 5+len(name)) + + // The Type method does not return any permission bits, + // so strip them from the string. + mode := dir.Type().String() + mode = mode[:len(mode)-9] + + b = append(b, mode...) + b = append(b, ' ') + b = append(b, name...) + if dir.IsDir() { + b = append(b, '/') + } + return string(b) +} + +// formatFileInfo returns a formatted version of info for human readability. +// Implementations of [FileInfo] can call this from a String method. +// The output for a file named "hello.go", 100 bytes, mode 0o644, created +// January 1, 1970 at noon is +// +// -rw-r--r-- 100 1970-01-01 12:00:00 hello.go +// +// TODO: Use fs.FormatFileInfo when Go 1.20 is no longer supported +func formatFileInfo(info fs.FileInfo) string { + name := info.Name() + b := make([]byte, 0, 40+len(name)) + b = append(b, info.Mode().String()...) + b = append(b, ' ') + + size := info.Size() + var usize uint64 + if size >= 0 { + usize = uint64(size) + } else { + b = append(b, '-') + usize = uint64(-size) + } + var buf [20]byte + i := len(buf) - 1 + for usize >= 10 { + q := usize / 10 + buf[i] = byte('0' + usize - q*10) + i-- + usize = q + } + buf[i] = byte('0' + usize) + b = append(b, buf[i:]...) + b = append(b, ' ') + + b = append(b, info.ModTime().Format(time.DateTime)...) + b = append(b, ' ') + + b = append(b, name...) + if info.IsDir() { + b = append(b, '/') + } + + return string(b) +} + // toValidName coerces name to be a valid name for fs.FS.Open. func toValidName(name string) string { name = strings.ReplaceAll(name, `\`, `/`) p := path.Clean(name) + p = strings.TrimPrefix(p, "/") - p = strings.TrimPrefix(p, "../") + + for strings.HasPrefix(p, "../") { + p = p[len("../"):] + } + return p } diff --git a/vendor/github.com/klauspost/compress/zip/register.go b/vendor/github.com/klauspost/compress/zip/register.go index ca8c13ce..8ea88893 100644 --- a/vendor/github.com/klauspost/compress/zip/register.go +++ b/vendor/github.com/klauspost/compress/zip/register.go @@ -20,7 +20,7 @@ import ( type Compressor func(w io.Writer) (io.WriteCloser, error) // A Decompressor returns a new decompressing reader, reading from r. -// The ReadCloser's Close method must be used to release associated resources. +// The [io.ReadCloser]'s Close method must be used to release associated resources. // The Decompressor itself must be safe to invoke from multiple goroutines // simultaneously, but each returned reader will be used only by // one goroutine at a time. @@ -116,7 +116,7 @@ func init() { } // RegisterDecompressor allows custom decompressors for a specified method ID. -// The common methods Store and Deflate are built in. +// The common methods [Store] and [Deflate] are built in. func RegisterDecompressor(method uint16, dcomp Decompressor) { if _, dup := decompressors.LoadOrStore(method, dcomp); dup { panic("decompressor already registered") @@ -124,7 +124,7 @@ func RegisterDecompressor(method uint16, dcomp Decompressor) { } // RegisterCompressor registers custom compressors for a specified method ID. -// The common methods Store and Deflate are built in. +// The common methods [Store] and [Deflate] are built in. func RegisterCompressor(method uint16, comp Compressor) { if _, dup := compressors.LoadOrStore(method, comp); dup { panic("compressor already registered") diff --git a/vendor/github.com/klauspost/compress/zip/struct.go b/vendor/github.com/klauspost/compress/zip/struct.go index 88effedc..2637e9c2 100644 --- a/vendor/github.com/klauspost/compress/zip/struct.go +++ b/vendor/github.com/klauspost/compress/zip/struct.go @@ -5,7 +5,7 @@ /* Package zip provides support for reading and writing ZIP archives. -See: https://www.pkware.com/appnote +See the [ZIP specification] for details. This package does not support disk spanning. @@ -16,6 +16,8 @@ fields. The 64 bit fields will always contain the correct value and for normal archives both fields will be the same. For files requiring the ZIP64 format the 32 bit fields will be 0xffffffff and the 64 bit fields must be used instead. + +[ZIP specification]: https://support.pkware.com/pkzip/appnote */ package zip @@ -65,7 +67,7 @@ const ( // // IDs 0..31 are reserved for official use by PKWARE. // IDs above that range are defined by third-party vendors. - // Since ZIP lacked high precision timestamps (nor a official specification + // Since ZIP lacked high precision timestamps (nor an official specification // of the timezone used for the date fields), many competing extra fields // have been invented. Pervasive use effectively makes them "official". // @@ -77,21 +79,16 @@ const ( infoZipUnixExtraID = 0x5855 // Info-ZIP Unix extension ) -// FileHeader describes a file within a zip file. -// See the zip spec for details. +// FileHeader describes a file within a ZIP file. +// See the [ZIP specification] for details. +// +// [ZIP specification]: https://support.pkware.com/pkzip/appnote type FileHeader struct { // Name is the name of the file. // // It must be a relative path, not start with a drive letter (such as "C:"), // and must use forward slashes instead of back slashes. A trailing slash // indicates that this file is a directory and should have no data. - // - // When reading zip files, the Name field is populated from - // the zip file directly and is not validated for correctness. - // It is the caller's responsibility to sanitize it as - // appropriate, including canonicalizing slash directions, - // validating that paths are relative, and preventing path - // traversal through filenames ("../../../"). Name string // Comment is any arbitrary user-defined string shorter than 64KiB. @@ -124,25 +121,51 @@ type FileHeader struct { // When writing, an extended timestamp (which is timezone-agnostic) is // always emitted. The legacy MS-DOS date field is encoded according to the // location of the Modified time. - Modified time.Time - ModifiedTime uint16 // Deprecated: Legacy MS-DOS date; use Modified instead. - ModifiedDate uint16 // Deprecated: Legacy MS-DOS time; use Modified instead. - - CRC32 uint32 - CompressedSize uint32 // Deprecated: Use CompressedSize64 instead. - UncompressedSize uint32 // Deprecated: Use UncompressedSize64 instead. - CompressedSize64 uint64 + Modified time.Time + + // ModifiedTime is an MS-DOS-encoded time. + // + // Deprecated: Use Modified instead. + ModifiedTime uint16 + + // ModifiedDate is an MS-DOS-encoded date. + // + // Deprecated: Use Modified instead. + ModifiedDate uint16 + + // CRC32 is the CRC32 checksum of the file content. + CRC32 uint32 + + // CompressedSize is the compressed size of the file in bytes. + // If either the uncompressed or compressed size of the file + // does not fit in 32 bits, CompressedSize is set to ^uint32(0). + // + // Deprecated: Use CompressedSize64 instead. + CompressedSize uint32 + + // UncompressedSize is the compressed size of the file in bytes. + // If either the uncompressed or compressed size of the file + // does not fit in 32 bits, CompressedSize is set to ^uint32(0). + // + // Deprecated: Use UncompressedSize64 instead. + UncompressedSize uint32 + + // CompressedSize64 is the compressed size of the file in bytes. + CompressedSize64 uint64 + + // UncompressedSize64 is the uncompressed size of the file in bytes. UncompressedSize64 uint64 - Extra []byte - ExternalAttrs uint32 // Meaning depends on CreatorVersion + + Extra []byte + ExternalAttrs uint32 // Meaning depends on CreatorVersion } -// FileInfo returns an fs.FileInfo for the FileHeader. +// FileInfo returns an fs.FileInfo for the [FileHeader]. func (h *FileHeader) FileInfo() fs.FileInfo { return headerFileInfo{h} } -// headerFileInfo implements fs.FileInfo. +// headerFileInfo implements [fs.FileInfo]. type headerFileInfo struct { fh *FileHeader } @@ -163,11 +186,15 @@ func (fi headerFileInfo) ModTime() time.Time { } func (fi headerFileInfo) Mode() fs.FileMode { return fi.fh.Mode() } func (fi headerFileInfo) Type() fs.FileMode { return fi.fh.Mode().Type() } -func (fi headerFileInfo) Sys() interface{} { return fi.fh } +func (fi headerFileInfo) Sys() any { return fi.fh } func (fi headerFileInfo) Info() (fs.FileInfo, error) { return fi, nil } -// FileInfoHeader creates a partially-populated FileHeader from an +func (fi headerFileInfo) String() string { + return formatFileInfo(fi) +} + +// FileInfoHeader creates a partially-populated [FileHeader] from an // fs.FileInfo. // Because fs.FileInfo's Name method returns only the base name of // the file it describes, it may be necessary to modify the Name field @@ -218,7 +245,7 @@ func timeZone(offset time.Duration) *time.Location { // msDosTimeToTime converts an MS-DOS date and time into a time.Time. // The resolution is 2s. -// See: https://msdn.microsoft.com/en-us/library/ms724247(v=VS.85).aspx +// See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-dosdatetimetofiletime func msDosTimeToTime(dosDate, dosTime uint16) time.Time { return time.Date( // date bits 0-4: day of month; 5-8: month; 9-15: years since 1980 @@ -238,7 +265,7 @@ func msDosTimeToTime(dosDate, dosTime uint16) time.Time { // timeToMsDosTime converts a time.Time to an MS-DOS date and time. // The resolution is 2s. -// See: https://msdn.microsoft.com/en-us/library/ms724274(v=VS.85).aspx +// See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-filetimetodosdatetime func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) { fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9) fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11) @@ -246,17 +273,17 @@ func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) { } // ModTime returns the modification time in UTC using the legacy -// ModifiedDate and ModifiedTime fields. +// [ModifiedDate] and [ModifiedTime] fields. // -// Deprecated: Use Modified instead. +// Deprecated: Use [Modified] instead. func (h *FileHeader) ModTime() time.Time { return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime) } -// SetModTime sets the Modified, ModifiedTime, and ModifiedDate fields +// SetModTime sets the [Modified], [ModifiedTime], and [ModifiedDate] fields // to the given time in UTC. // -// Deprecated: Use Modified instead. +// Deprecated: Use [Modified] instead. func (h *FileHeader) SetModTime(t time.Time) { t = t.UTC() // Convert to UTC for compatibility h.Modified = t @@ -282,7 +309,7 @@ const ( msdosReadOnly = 0x01 ) -// Mode returns the permission and mode bits for the FileHeader. +// Mode returns the permission and mode bits for the [FileHeader]. func (h *FileHeader) Mode() (mode fs.FileMode) { switch h.CreatorVersion >> 8 { case creatorUnix, creatorMacOSX: @@ -296,7 +323,7 @@ func (h *FileHeader) Mode() (mode fs.FileMode) { return mode } -// SetMode changes the permission and mode bits for the FileHeader. +// SetMode changes the permission and mode bits for the [FileHeader]. func (h *FileHeader) SetMode(mode fs.FileMode) { h.CreatorVersion = h.CreatorVersion&0xff | creatorUnix<<8 h.ExternalAttrs = fileModeToUnixMode(mode) << 16 @@ -315,8 +342,8 @@ func (h *FileHeader) isZip64() bool { return h.CompressedSize64 >= uint32max || h.UncompressedSize64 >= uint32max } -func (f *FileHeader) hasDataDescriptor() bool { - return f.Flags&0x8 != 0 +func (h *FileHeader) hasDataDescriptor() bool { + return h.Flags&0x8 != 0 } func msdosModeToFileMode(m uint32) (mode fs.FileMode) { diff --git a/vendor/github.com/klauspost/compress/zip/writer.go b/vendor/github.com/klauspost/compress/zip/writer.go index d3ef0eb8..b85bb91d 100644 --- a/vendor/github.com/klauspost/compress/zip/writer.go +++ b/vendor/github.com/klauspost/compress/zip/writer.go @@ -11,6 +11,7 @@ import ( "hash" "hash/crc32" "io" + "io/fs" "strings" "unicode/utf8" ) @@ -40,7 +41,7 @@ type header struct { raw bool } -// NewWriter returns a new Writer writing a zip file to w. +// NewWriter returns a new [Writer] writing a zip file to w. func NewWriter(w io.Writer) *Writer { return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}} } @@ -63,7 +64,7 @@ func (w *Writer) Flush() error { } // SetComment sets the end-of-central-directory comment field. -// It can only be called before Close. +// It can only be called before [Writer.Close]. func (w *Writer) SetComment(comment string) error { if len(comment) > uint16max { return errors.New("zip: Writer.Comment too long") @@ -125,7 +126,7 @@ func (w *Writer) Close() error { b.uint16(uint16(len(h.Comment))) b = b[4:] // skip disk number start and internal file attr (2x uint16) b.uint32(h.ExternalAttrs) - if h.isZip64() || h.offset > uint32max { + if h.offset > uint32max { b.uint32(uint32max) } else { b.uint32(uint32(h.offset)) @@ -207,14 +208,14 @@ func (w *Writer) Close() error { } // Create adds a file to the zip file using the provided name. -// It returns a Writer to which the file contents should be written. -// The file contents will be compressed using the Deflate method. +// It returns a [Writer] to which the file contents should be written. +// The file contents will be compressed using the [Deflate] method. // The name must be a relative path: it must not start with a drive // letter (e.g. C:) or leading slash, and only forward slashes are // allowed. To create a directory instead of a file, add a trailing // slash to the name. -// The file's contents must be written to the io.Writer before the next -// call to Create, CreateHeader, or Close. +// The file's contents must be written to the [io.Writer] before the next +// call to [Writer.Create], [Writer.CreateHeader], or [Writer.Close]. func (w *Writer) Create(name string) (io.Writer, error) { header := &FileHeader{ Name: name, @@ -261,13 +262,13 @@ func (w *Writer) prepare(fh *FileHeader) error { return nil } -// CreateHeader adds a file to the zip archive using the provided FileHeader -// for the file metadata. Writer takes ownership of fh and may mutate -// its fields. The caller must not modify fh after calling CreateHeader. +// CreateHeader adds a file to the zip archive using the provided [FileHeader] +// for the file metadata. [Writer] takes ownership of fh and may mutate +// its fields. The caller must not modify fh after calling [Writer.CreateHeader]. // -// This returns a Writer to which the file contents should be written. +// This returns a [Writer] to which the file contents should be written. // The file's contents must be written to the io.Writer before the next -// call to Create, CreateHeader, CreateRaw, or Close. +// call to [Writer.Create], [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close]. func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) { if err := w.prepare(fh); err != nil { return nil, err @@ -433,18 +434,12 @@ func min64(x, y uint64) uint64 { return y } -// CreateHeaderRaw is replaced by CreateRaw. -// Deprecated: CreateHeaderRaw is replaced by CreateRaw (stdlib name). -func (w *Writer) CreateHeaderRaw(fh *FileHeader) (io.Writer, error) { - return w.CreateRaw(fh) -} - -// CreateRaw adds a file to the zip archive using the provided FileHeader and -// returns a Writer to which the file contents should be written. The file's -// contents must be written to the io.Writer before the next call to Create, -// CreateHeader, CreateRaw, or Close. +// CreateRaw adds a file to the zip archive using the provided [FileHeader] and +// returns a [Writer] to which the file contents should be written. The file's +// contents must be written to the io.Writer before the next call to [Writer.Create], +// [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close]. // -// In contrast to CreateHeader, the bytes passed to Writer are not compressed. +// In contrast to [Writer.CreateHeader], the bytes passed to Writer are not compressed. func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) { if err := w.prepare(fh); err != nil { return nil, err @@ -476,9 +471,8 @@ func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) { return fw, nil } -// Copy copies the file f (obtained from a Reader) into w. It copies the raw +// Copy copies the file f (obtained from a [Reader]) into w. It copies the raw // form directly bypassing decompression, compression, and validation. -// CHANGE: Optional file name cannot be specified any more due to stdlib api. func (w *Writer) Copy(f *File) error { r, err := f.OpenRaw() if err != nil { @@ -493,7 +487,7 @@ func (w *Writer) Copy(f *File) error { } // RegisterCompressor registers or overrides a custom compressor for a specific -// method ID. If a compressor for a given method is not found, Writer will +// method ID. If a compressor for a given method is not found, [Writer] will // default to looking up the compressor at the package level. func (w *Writer) RegisterCompressor(method uint16, comp Compressor) { if w.compressors == nil { @@ -502,6 +496,44 @@ func (w *Writer) RegisterCompressor(method uint16, comp Compressor) { w.compressors[method] = comp } +// AddFS adds the files from fs.FS to the archive. +// It walks the directory tree starting at the root of the filesystem +// adding each file to the zip using deflate while maintaining the directory structure. +func (w *Writer) AddFS(fsys fs.FS) error { + return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + return nil + } + info, err := d.Info() + if err != nil { + return err + } + if !info.Mode().IsRegular() { + return errors.New("zip: cannot add non-regular file") + } + h, err := FileInfoHeader(info) + if err != nil { + return err + } + h.Name = name + h.Method = Deflate + fw, err := w.CreateHeader(h) + if err != nil { + return err + } + f, err := fsys.Open(name) + if err != nil { + return err + } + defer f.Close() + _, err = io.Copy(fw, f) + return err + }) +} + func (w *Writer) compressor(method uint16) Compressor { comp := w.compressors[method] if comp == nil { diff --git a/vendor/github.com/klauspost/compress/zlib/reader.go b/vendor/github.com/klauspost/compress/zlib/reader.go index f127d477..cb652b90 100644 --- a/vendor/github.com/klauspost/compress/zlib/reader.go +++ b/vendor/github.com/klauspost/compress/zlib/reader.go @@ -26,6 +26,7 @@ package zlib import ( "bufio" "compress/zlib" + "encoding/binary" "hash" "hash/adler32" "io" @@ -33,7 +34,10 @@ import ( "github.com/klauspost/compress/flate" ) -const zlibDeflate = 8 +const ( + zlibDeflate = 8 + zlibMaxWindow = 7 +) var ( // ErrChecksum is returned when reading ZLIB data that has an invalid checksum. @@ -52,7 +56,7 @@ type reader struct { scratch [4]byte } -// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// Resetter resets a ReadCloser returned by [NewReader] or [NewReaderDict] // to switch to a new underlying Reader. This permits reusing a ReadCloser // instead of allocating a new one. type Resetter interface { @@ -63,20 +67,20 @@ type Resetter interface { // NewReader creates a new ReadCloser. // Reads from the returned ReadCloser read and decompress data from r. -// If r does not implement io.ByteReader, the decompressor may read more +// If r does not implement [io.ByteReader], the decompressor may read more // data than necessary from r. // It is the caller's responsibility to call Close on the ReadCloser when done. // -// The ReadCloser returned by NewReader also implements Resetter. +// The [io.ReadCloser] returned by NewReader also implements [Resetter]. func NewReader(r io.Reader) (io.ReadCloser, error) { return NewReaderDict(r, nil) } -// NewReaderDict is like NewReader but uses a preset dictionary. +// NewReaderDict is like [NewReader] but uses a preset dictionary. // NewReaderDict ignores the dictionary if the compressed data does not refer to it. -// If the compressed data refers to a different dictionary, NewReaderDict returns ErrDictionary. +// If the compressed data refers to a different dictionary, NewReaderDict returns [ErrDictionary]. // -// The ReadCloser returned by NewReaderDict also implements Resetter. +// The ReadCloser returned by NewReaderDict also implements [Resetter]. func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) { z := new(reader) err := z.Reset(r, dict) @@ -108,7 +112,7 @@ func (z *reader) Read(p []byte) (int, error) { return n, z.err } // ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952). - checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) + checksum := binary.BigEndian.Uint32(z.scratch[:4]) if checksum != z.digest.Sum32() { z.err = ErrChecksum return n, z.err @@ -116,9 +120,9 @@ func (z *reader) Read(p []byte) (int, error) { return n, io.EOF } -// Calling Close does not close the wrapped io.Reader originally passed to NewReader. +// Calling Close does not close the wrapped [io.Reader] originally passed to [NewReader]. // In order for the ZLIB checksum to be verified, the reader must be -// fully consumed until the io.EOF. +// fully consumed until the [io.EOF]. func (z *reader) Close() error { if z.err != nil && z.err != io.EOF { return z.err @@ -128,7 +132,7 @@ func (z *reader) Close() error { } func (z *reader) Reset(r io.Reader, dict []byte) error { - *z = reader{decompressor: z.decompressor, digest: z.digest} + *z = reader{decompressor: z.decompressor} if fr, ok := r.(flate.Reader); ok { z.r = fr } else { @@ -143,8 +147,8 @@ func (z *reader) Reset(r io.Reader, dict []byte) error { } return z.err } - h := uint(z.scratch[0])<<8 | uint(z.scratch[1]) - if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) { + h := binary.BigEndian.Uint16(z.scratch[:2]) + if (z.scratch[0]&0x0f != zlibDeflate) || (z.scratch[0]>>4 > zlibMaxWindow) || (h%31 != 0) { z.err = ErrHeader return z.err } @@ -157,7 +161,7 @@ func (z *reader) Reset(r io.Reader, dict []byte) error { } return z.err } - checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) + checksum := binary.BigEndian.Uint32(z.scratch[:4]) if checksum != adler32.Checksum(dict) { z.err = ErrDictionary return z.err diff --git a/vendor/github.com/klauspost/compress/zlib/writer.go b/vendor/github.com/klauspost/compress/zlib/writer.go index 605816ba..cab9ef3e 100644 --- a/vendor/github.com/klauspost/compress/zlib/writer.go +++ b/vendor/github.com/klauspost/compress/zlib/writer.go @@ -5,6 +5,7 @@ package zlib import ( + "encoding/binary" "fmt" "hash" "hash/adler32" @@ -20,7 +21,7 @@ const ( BestSpeed = flate.BestSpeed BestCompression = flate.BestCompression DefaultCompression = flate.DefaultCompression - ConstantCompression = flate.ConstantCompression + ConstantCompression = flate.ConstantCompression // Deprecated: Use HuffmanOnly. HuffmanOnly = flate.HuffmanOnly ) @@ -40,7 +41,7 @@ type Writer struct { // NewWriter creates a new Writer. // Writes to the returned Writer are compressed and written to w. // -// It is the caller's responsibility to call Close on the WriteCloser when done. +// It is the caller's responsibility to call Close on the Writer when done. // Writes may be buffered and not flushed until Close. func NewWriter(w io.Writer) *Writer { z, _ := NewWriterLevelDict(w, DefaultCompression, nil) @@ -116,17 +117,13 @@ func (z *Writer) writeHeader() (err error) { if z.dict != nil { z.scratch[1] |= 1 << 5 } - z.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31) + z.scratch[1] += uint8(31 - binary.BigEndian.Uint16(z.scratch[:2])%31) if _, err = z.w.Write(z.scratch[0:2]); err != nil { return err } if z.dict != nil { // The next four bytes are the Adler-32 checksum of the dictionary. - checksum := adler32.Checksum(z.dict) - z.scratch[0] = uint8(checksum >> 24) - z.scratch[1] = uint8(checksum >> 16) - z.scratch[2] = uint8(checksum >> 8) - z.scratch[3] = uint8(checksum >> 0) + binary.BigEndian.PutUint32(z.scratch[:], adler32.Checksum(z.dict)) if _, err = z.w.Write(z.scratch[0:4]); err != nil { return err } @@ -192,10 +189,7 @@ func (z *Writer) Close() error { } checksum := z.digest.Sum32() // ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952). - z.scratch[0] = uint8(checksum >> 24) - z.scratch[1] = uint8(checksum >> 16) - z.scratch[2] = uint8(checksum >> 8) - z.scratch[3] = uint8(checksum >> 0) + binary.BigEndian.PutUint32(z.scratch[:], checksum) _, z.err = z.w.Write(z.scratch[0:4]) return z.err } diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 03744fbc..9c28840c 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -598,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: - println("Reading table for", tableIndex(i)) + if debugDecoder { + println("Reading table for", tableIndex(i)) + } if seq.fse == nil || seq.fse.preDefined { seq.fse = fseDecoderPool.Get().(*fseDecoder) } diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index 8d5567fe..b7b83164 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -273,6 +273,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { enc.Encode(&block, b) addValues(&remain, block.literals) litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } seqs += len(block.sequences) block.genCodes() addHist(&ll, block.coders.llEnc.Histogram()) @@ -286,6 +289,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if offset == 0 { continue } + if int(offset) >= len(o.History) { + continue + } if offset > 3 { newOffsets[offset-3]++ } else { @@ -336,6 +342,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if seqs/nUsed < 512 { // Use 512 as minimum. nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } } copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { hist := dst.Histogram() @@ -358,6 +367,28 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { fakeLength += v hist[i] = uint32(v) } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + dst.HistogramFinished(maxSym, maxCount) dst.reUsed = false dst.useRLE = false diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index a4f5bf91..84a79fde 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -179,9 +179,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -210,12 +210,12 @@ encodeLoop: // Index match start+1 (long) -> s - 1 index0 := s + repOff - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -241,9 +241,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -270,11 +270,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -708,9 +708,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -738,12 +738,12 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -772,9 +772,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -801,11 +801,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index a154c18f..d36be7bd 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -138,9 +138,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -166,11 +166,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -798,9 +798,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -826,11 +826,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 72af7ef0..8f8223cd 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -6,6 +6,7 @@ package zstd import ( "crypto/rand" + "errors" "fmt" "io" "math" @@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) { // and write CRC if requested. func (e *Encoder) Write(p []byte) (n int, err error) { s := &e.state + if s.eofWritten { + return 0, ErrEncoderClosed + } for len(p) > 0 { if len(p)+len(s.filling) < e.o.blockSize { if e.o.crc { @@ -202,7 +206,7 @@ func (e *Encoder) nextBlock(final bool) error { return nil } if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) var n2 int n2, s.err = s.w.Write(s.current) if s.err != nil { @@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error { s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.nInput += int64(len(s.current)) s.wg.Add(1) + if final { + s.eofWritten = true + } go func(src []byte) { if debugEncoder { println("Adding block,", len(src), "bytes, final:", final) @@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error { blk := enc.Block() enc.Encode(blk, src) blk.last = final - if final { - s.eofWritten = true - } // Wait for pending writes. s.wWg.Wait() if s.writeErr != nil { @@ -401,12 +405,20 @@ func (e *Encoder) Flush() error { if len(s.filling) > 0 { err := e.nextBlock(false) if err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } } s.wg.Wait() s.wWg.Wait() if s.err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return s.err } return s.writeErr @@ -422,6 +434,9 @@ func (e *Encoder) Close() error { } err := e.nextBlock(true) if err != nil { + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } if s.frameContentSize > 0 { @@ -459,6 +474,11 @@ func (e *Encoder) Close() error { } _, s.err = s.w.Write(frame) } + if s.err == nil { + s.err = ErrEncoderClosed + return nil + } + return s.err } @@ -469,6 +489,15 @@ func (e *Encoder) Close() error { // Data compressed with EncodeAll can be decoded with the Decoder, // using either a stream or DecodeAll. func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { if len(src) == 0 { if e.o.fullZero { // Add frame header. @@ -491,13 +520,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() + // Use single segments when above minimum window and below window size. single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 53e160f7..e47af66e 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error { } return err } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } windowLog := 10 + (wd >> 3) windowBase := uint64(1) << windowLog windowAdd := (windowBase / 8) * uint64(wd&0x7) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s index 17901e08..ae7d4d32 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -162,12 +162,12 @@ finalize: MOVD h, ret+24(FP) RET -// func writeBlocks(d *Digest, b []byte) int +// func writeBlocks(s *Digest, b []byte) int TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 LDP ·primes+0(SB), (prime1, prime2) // Load state. Assume v[1-4] are stored contiguously. - MOVD d+0(FP), digest + MOVD s+0(FP), digest LDP 0(digest), (v1, v2) LDP 16(digest), (v3, v4) diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s index 9a7655c0..0782b86e 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -5,7 +5,6 @@ #include "textflag.h" // func matchLen(a []byte, b []byte) int -// Requires: BMI TEXT ·matchLen(SB), NOSPLIT, $0-56 MOVQ a_base+0(FP), AX MOVQ b_base+24(FP), CX @@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56 JB matchlen_match4_standalone matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone #ifdef GOAMD64_v3 TZCNTQ BX, BX #else BSFQ BX, BX #endif - SARQ $0x03, BX + SHRL $0x03, BX LEAL (SI)(BX*1), SI JMP gen_match_len_end diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 8adabd82..c59f17e0 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) } s.seqSize += ctx.litRemain @@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { return io.ErrUnexpectedEOF } - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) } if ctx.litRemain < 0 { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 5b06174b..f5591fa1 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 4be7cc73..066bef2a 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -88,6 +88,10 @@ var ( // Close has been called. ErrDecoderClosed = errors.New("decoder used after Close") + // ErrEncoderClosed will be returned if the Encoder was used after + // Close has been called. + ErrEncoderClosed = errors.New("encoder used after Close") + // ErrDecoderNilInput is returned when a nil Reader was provided // and an operation other than Reset/DecodeAll/Close was attempted. ErrDecoderNilInput = errors.New("nil input provided as reader") diff --git a/vendor/github.com/mholt/archiver/v4/7z.go b/vendor/github.com/mholt/archiver/v4/7z.go index 44f8a00b..06e4dd17 100644 --- a/vendor/github.com/mholt/archiver/v4/7z.go +++ b/vendor/github.com/mholt/archiver/v4/7z.go @@ -31,13 +31,13 @@ type SevenZip struct { Password string } -func (z SevenZip) Name() string { return ".7z" } +func (z SevenZip) Extension() string { return ".7z" } -func (z SevenZip) Match(filename string, stream io.Reader) (MatchResult, error) { +func (z SevenZip) Match(_ context.Context, filename string, stream io.Reader) (MatchResult, error) { var mr MatchResult // match filename - if strings.Contains(strings.ToLower(filename), z.Name()) { + if strings.Contains(strings.ToLower(filename), z.Extension()) { mr.ByName = true } @@ -51,10 +51,7 @@ func (z SevenZip) Match(filename string, stream io.Reader) (MatchResult, error) return mr, nil } -// Archive is not implemented for 7z, but the method exists so that SevenZip satisfies the ArchiveFormat interface. -func (z SevenZip) Archive(_ context.Context, _ io.Writer, _ []File) error { - return fmt.Errorf("not implemented for 7z because there is no pure Go implementation found") -} +// Archive is not implemented for 7z because I do not know of a pure-Go 7z writer. // Extract extracts files from z, implementing the Extractor interface. Uniquely, however, // sourceArchive must be an io.ReaderAt and io.Seeker, which are oddly disjoint interfaces @@ -62,7 +59,7 @@ func (z SevenZip) Archive(_ context.Context, _ io.Writer, _ []File) error { // the interface because we figure you can Read() from anything you can ReadAt() or Seek() // with. Due to the nature of the zip archive format, if sourceArchive is not an io.Seeker // and io.ReaderAt, an error is returned. -func (z SevenZip) Extract(ctx context.Context, sourceArchive io.Reader, pathsInArchive []string, handleFile FileHandler) error { +func (z SevenZip) Extract(ctx context.Context, sourceArchive io.Reader, handleFile FileHandler) error { sra, ok := sourceArchive.(seekReaderAt) if !ok { return fmt.Errorf("input type must be an io.ReaderAt and io.Seeker because of zip format constraints") @@ -82,22 +79,27 @@ func (z SevenZip) Extract(ctx context.Context, sourceArchive io.Reader, pathsInA skipDirs := skipList{} for i, f := range zr.File { + f := f // make a copy for the Open closure if err := ctx.Err(); err != nil { return err // honor context cancellation } - if !fileIsIncluded(pathsInArchive, f.Name) { - continue - } if fileIsIncluded(skipDirs, f.Name) { continue } - file := File{ - FileInfo: f.FileInfo(), + fi := f.FileInfo() + file := FileInfo{ + FileInfo: fi, Header: f.FileHeader, NameInArchive: f.Name, - Open: func() (io.ReadCloser, error) { return f.Open() }, + Open: func() (fs.File, error) { + openedFile, err := f.Open() + if err != nil { + return nil, err + } + return fileInArchive{openedFile, fi}, nil + }, } err := handleFile(ctx, file) @@ -122,3 +124,6 @@ func (z SevenZip) Extract(ctx context.Context, sourceArchive io.Reader, pathsInA // https://py7zr.readthedocs.io/en/latest/archive_format.html#signature var sevenZipHeader = []byte("7z\xBC\xAF\x27\x1C") + +// Interface guard +var _ Extractor = SevenZip{} diff --git a/vendor/github.com/mholt/archiver/v4/README.md b/vendor/github.com/mholt/archiver/v4/README.md index 90937eb6..9b1cdc78 100644 --- a/vendor/github.com/mholt/archiver/v4/README.md +++ b/vendor/github.com/mholt/archiver/v4/README.md @@ -1,6 +1,6 @@ # archiver [![Go Reference](https://pkg.go.dev/badge/github.com/mholt/archiver/v4.svg)](https://pkg.go.dev/github.com/mholt/archiver/v4) [![Ubuntu-latest](https://github.com/mholt/archiver/actions/workflows/ubuntu-latest.yml/badge.svg)](https://github.com/mholt/archiver/actions/workflows/ubuntu-latest.yml) [![Macos-latest](https://github.com/mholt/archiver/actions/workflows/macos-latest.yml/badge.svg)](https://github.com/mholt/archiver/actions/workflows/macos-latest.yml) [![Windows-latest](https://github.com/mholt/archiver/actions/workflows/windows-latest.yml/badge.svg)](https://github.com/mholt/archiver/actions/workflows/windows-latest.yml) -Introducing **Archiver 4.0** - a cross-platform, multi-format archive utility and Go library. A powerful and flexible library meets an elegant CLI in this generic replacement for several platform-specific or format-specific archive utilities. +Introducing **Archiver 4.0 (alpha)** - a cross-platform, multi-format archive utility and Go library. A powerful and flexible library meets an elegant CLI in this generic replacement for several platform-specific or format-specific archive utilities. **:warning: v4 is in ALPHA. The core library APIs work pretty well but the command has not been implemented yet, nor have most automated tests. If you need the `arc` command, stick with v3 for now.** @@ -11,14 +11,14 @@ Introducing **Archiver 4.0** - a cross-platform, multi-format archive utility an - By file name - By header - Traverse directories, archive files, and any other file uniformly as [`io/fs`](https://pkg.go.dev/io/fs) file systems: - - [`DirFS`](https://pkg.go.dev/github.com/mholt/archiver/v4#DirFS) - [`FileFS`](https://pkg.go.dev/github.com/mholt/archiver/v4#FileFS) + - [`DirFS`](https://pkg.go.dev/github.com/mholt/archiver/v4#DirFS) - [`ArchiveFS`](https://pkg.go.dev/github.com/mholt/archiver/v4#ArchiveFS) - Compress and decompress files - Create and extract archive files - Walk or traverse into archive files - Extract only specific files from archives -- Insert (append) into .tar files +- Insert (append) into .tar and .zip archives - Read from password-protected 7-Zip files - Numerous archive and compression formats supported - Extensible (add more formats just by registering them) @@ -36,6 +36,7 @@ Introducing **Archiver 4.0** - a cross-platform, multi-format archive utility an - flate (.zip) - gzip (.gz) - lz4 (.lz4) +- lzip (.lz) - snappy (.sz) - xz (.xz) - zlib (.zz) @@ -116,7 +117,7 @@ If you want all the files, pass in a nil list of file paths. ```go // the type that will be used to read the input stream -format := archiver.Zip{} +var format archiver.Zip // the list of files we want out of the archive; any // directories will include all their contents unless @@ -140,7 +141,7 @@ if err != nil { Have an input stream with unknown contents? No problem, archiver can identify it for you. It will try matching based on filename and/or the header (which peeks at the stream): ```go -format, input, err := archiver.Identify("filename.tar.zst", input) +format, input, err := archiver.Identify(ctx, "filename.tar.zst", input) if err != nil { return err } @@ -164,7 +165,7 @@ if decom, ok := format.(archiver.Decompressor); ok { } ``` -`Identify()` works by reading an arbitrary number of bytes from the beginning of the stream (just enough to check for file headers). It buffers them and returns a new reader that lets you re-read them anew. +`Identify()` works by reading an arbitrary number of bytes from the beginning of the stream (just enough to check for file headers). It buffers them and returns a new reader that lets you re-read them anew. If your input stream is `io.Seeker` however, no buffer is created (it uses `Seek()` instead). ### Virtual file systems @@ -211,7 +212,7 @@ if dir, ok := f.(fs.ReadDirFile); ok { return err } for _, e := range entries { - fmt.Println(e.Name()) + fmt.Println(e.Extension()) } } ``` @@ -224,7 +225,7 @@ if err != nil { return err } for _, e := range entries { - fmt.Println(e.Name()) + fmt.Println(e.Extension()) } ``` @@ -246,6 +247,8 @@ if err != nil { } ``` +**Important .tar note:** Tar files do not efficiently implement file system semantics due to their roots in sequential-access design for tapes. File systems inherently assume random access, but tar files need to be read from the beginning to access something at the end. This is especially slow when the archive is compressed. Optimizations have been implemented to amortize `ReadDir()` calls so that `fs.WalkDir()` only has to scan the archive once, but they use more memory. Open calls require another scan to find the file. It may be more efficient to use `Tar.Extract()` directly if file system semantics are not important to you. + #### Use with `http.FileServer` It can be used with http.FileServer to browse archives and directories in a browser. However, due to how http.FileServer works, don't directly use http.FileServer with compressed files; instead wrap it like following: @@ -301,9 +304,9 @@ defer decompressor.Close() // reads from decompressor will be decompressed ``` -### Append to tarball +### Append to tarball and zip archives -Tar archives can be appended to without creating a whole new archive by calling `Insert()` on a tar stream. However, this requires that the tarball is not compressed (due to complexities with modifying compression dictionaries). +Tar and Zip archives can be appended to without creating a whole new archive by calling `Insert()` on a tar or zip stream. However, for tarballs, this requires that the tarball is not compressed (due to complexities with modifying compression dictionaries). Here is an example that appends a file to a tarball on disk: @@ -325,3 +328,5 @@ if err != nil { } ``` +The code is similar for inserting into a Zip archive, except you'll call `Insert()` on the `Zip` type instead. + diff --git a/vendor/github.com/mholt/archiver/v4/archiver.go b/vendor/github.com/mholt/archiver/v4/archiver.go index 73ec00d4..f89410e2 100644 --- a/vendor/github.com/mholt/archiver/v4/archiver.go +++ b/vendor/github.com/mholt/archiver/v4/archiver.go @@ -12,14 +12,14 @@ import ( "time" ) -// File is a virtualized, generalized file abstraction for interacting with archives. -type File struct { +// FileInfo is a virtualized, generalized file abstraction for interacting with archives. +type FileInfo struct { fs.FileInfo // The file header as used/provided by the archive format. // Typically, you do not need to set this field when creating // an archive. - Header interface{} + Header any // The path of the file as it appears in the archive. // This is equivalent to Header.Name (for most Header @@ -27,6 +27,15 @@ type File struct { // it is such a common field and we want to preserve // format-agnosticism (no type assertions) for basic // operations. + // + // When extracting, this name or path may not have + // been sanitized; it should not be trusted at face + // value. Consider using path.Clean() before using. + // + // EXPERIMENTAL: If inserting a file into an archive, + // and this is left blank, the implementation of the + // archive format can default to using the file's base + // name. NameInArchive string // For symbolic and hard links, the target of the link. @@ -35,12 +44,11 @@ type File struct { // A callback function that opens the file to read its // contents. The file must be closed when reading is - // complete. Nil for files that don't have content - // (such as directories and links). - Open func() (io.ReadCloser, error) + // complete. + Open func() (fs.File, error) } -func (f File) Stat() (fs.FileInfo, error) { return f.FileInfo, nil } +func (f FileInfo) Stat() (fs.FileInfo, error) { return f.FileInfo, nil } // FilesFromDisk returns a list of files by walking the directories in the // given filenames map. The keys are the names on disk, and the values are @@ -63,8 +71,8 @@ func (f File) Stat() (fs.FileInfo, error) { return f.FileInfo, nil } // // This function is used primarily when preparing a list of files to add to // an archive. -func FilesFromDisk(options *FromDiskOptions, filenames map[string]string) ([]File, error) { - var files []File +func FilesFromDisk(options *FromDiskOptions, filenames map[string]string) ([]FileInfo, error) { + var files []FileInfo for rootOnDisk, rootInArchive := range filenames { walkErr := filepath.WalkDir(rootOnDisk, func(filename string, d fs.DirEntry, err error) error { if err != nil { @@ -109,11 +117,11 @@ func FilesFromDisk(options *FromDiskOptions, filenames map[string]string) ([]Fil info = noAttrFileInfo{info} } - file := File{ + file := FileInfo{ FileInfo: info, NameInArchive: nameInArchive, LinkTarget: linkTarget, - Open: func() (io.ReadCloser, error) { + Open: func() (fs.File, error) { return os.Open(filename) }, } @@ -186,7 +194,7 @@ func (no noAttrFileInfo) Mode() fs.FileMode { return no.FileInfo.Mode() & (fs.ModeType | fs.ModePerm) } func (noAttrFileInfo) ModTime() time.Time { return time.Time{} } -func (noAttrFileInfo) Sys() interface{} { return nil } +func (noAttrFileInfo) Sys() any { return nil } // FromDiskOptions specifies various options for gathering files from disk. type FromDiskOptions struct { @@ -210,19 +218,25 @@ type FromDiskOptions struct { // archive contents are not necessarily ordered, skipping directories requires // memory, and skipping lots of directories may run up your memory bill. // -// Any other returned error will terminate a walk. -type FileHandler func(ctx context.Context, f File) error +// Any other returned error will terminate a walk and be returned to the caller. +type FileHandler func(ctx context.Context, info FileInfo) error // openAndCopyFile opens file for reading, copies its // contents to w, then closes file. -func openAndCopyFile(file File, w io.Writer) error { +func openAndCopyFile(file FileInfo, w io.Writer) error { fileReader, err := file.Open() if err != nil { return err } defer fileReader.Close() + // When file is in use and size is being written to, creating the compressed + // file will fail with "archive/tar: write too long." Using CopyN gracefully + // handles this. _, err = io.Copy(w, fileReader) - return err + if err != nil && err != io.EOF { + return err + } + return nil } // fileIsIncluded returns true if filename is included according to @@ -250,6 +264,26 @@ func isSymlink(info fs.FileInfo) bool { return info.Mode()&os.ModeSymlink != 0 } +// streamSizeBySeeking determines the size of the stream by +// seeking to the end, then back again, so the resulting +// seek position upon returning is the same as when called +// (assuming no errors). +func streamSizeBySeeking(s io.Seeker) (int64, error) { + currentPosition, err := s.Seek(0, io.SeekCurrent) + if err != nil { + return 0, fmt.Errorf("getting current offset: %w", err) + } + maxPosition, err := s.Seek(0, io.SeekEnd) + if err != nil { + return 0, fmt.Errorf("fast-forwarding to end: %w", err) + } + _, err = s.Seek(currentPosition, io.SeekStart) + if err != nil { + return 0, fmt.Errorf("returning to prior offset %d: %w", currentPosition, err) + } + return maxPosition, nil +} + // skipList keeps a list of non-intersecting paths // as long as its add method is used. Identical // elements are rejected, more specific paths are diff --git a/vendor/github.com/mholt/archiver/v4/brotli.go b/vendor/github.com/mholt/archiver/v4/brotli.go index 5d17fae7..c650f40e 100644 --- a/vendor/github.com/mholt/archiver/v4/brotli.go +++ b/vendor/github.com/mholt/archiver/v4/brotli.go @@ -1,6 +1,7 @@ package archiver import ( + "context" "io" "strings" @@ -16,19 +17,25 @@ type Brotli struct { Quality int } -func (Brotli) Name() string { return ".br" } +func (Brotli) Extension() string { return ".br" } -func (br Brotli) Match(filename string, stream io.Reader) (MatchResult, error) { +func (br Brotli) Match(_ context.Context, filename string, stream io.Reader) (MatchResult, error) { var mr MatchResult // match filename - if strings.Contains(strings.ToLower(filename), br.Name()) { + if strings.Contains(strings.ToLower(filename), br.Extension()) { mr.ByName = true } - // brotli does not have well-defined file headers; the - // best way to match the stream would be to try decoding - // part of it, and this is not implemented for now + // brotli does not have well-defined file headers or a magic number; + // the best way to match the stream is probably to try decoding part + // of it, but we'll just have to guess a large-enough size that is + // still small enough for the smallest streams we'll encounter + r := brotli.NewReader(stream) + buf := make([]byte, 16) + if _, err := io.ReadFull(r, buf); err == nil { + mr.ByStream = true + } return mr, nil } diff --git a/vendor/github.com/mholt/archiver/v4/bz2.go b/vendor/github.com/mholt/archiver/v4/bz2.go index 57a278f4..a2a5f05e 100644 --- a/vendor/github.com/mholt/archiver/v4/bz2.go +++ b/vendor/github.com/mholt/archiver/v4/bz2.go @@ -2,6 +2,7 @@ package archiver import ( "bytes" + "context" "io" "strings" @@ -17,13 +18,13 @@ type Bz2 struct { CompressionLevel int } -func (Bz2) Name() string { return ".bz2" } +func (Bz2) Extension() string { return ".bz2" } -func (bz Bz2) Match(filename string, stream io.Reader) (MatchResult, error) { +func (bz Bz2) Match(_ context.Context, filename string, stream io.Reader) (MatchResult, error) { var mr MatchResult // match filename - if strings.Contains(strings.ToLower(filename), bz.Name()) { + if strings.Contains(strings.ToLower(filename), bz.Extension()) { mr.ByName = true } diff --git a/vendor/github.com/mholt/archiver/v4/formats.go b/vendor/github.com/mholt/archiver/v4/formats.go index db22811b..837114d9 100644 --- a/vendor/github.com/mholt/archiver/v4/formats.go +++ b/vendor/github.com/mholt/archiver/v4/formats.go @@ -12,7 +12,7 @@ import ( // RegisterFormat registers a format. It should be called during init. // Duplicate formats by name are not allowed and will panic. func RegisterFormat(format Format) { - name := strings.Trim(strings.ToLower(format.Name()), ".") + name := strings.Trim(strings.ToLower(format.Extension()), ".") if _, ok := formats[name]; ok { panic("format " + name + " is already registered") } @@ -32,23 +32,31 @@ func RegisterFormat(format Format) { // // If stream is non-nil then the returned io.Reader will always be // non-nil and will read from the same point as the reader which was -// passed in; it should be used in place of the input stream after +// passed in. If the input stream is not an io.Seeker, the returned +// io.Reader value should be used in place of the input stream after // calling Identify() because it preserves and re-reads the bytes that // were already read during the identification process. -func Identify(filename string, stream io.Reader) (Format, io.Reader, error) { +// +// If the input stream is an io.Seeker, Seek() must work, and the +// original input value will be returned instead of a wrapper value. +func Identify(ctx context.Context, filename string, stream io.Reader) (Format, io.Reader, error) { var compression Compression var archival Archival + var extraction Extraction - rewindableStream := newRewindReader(stream) + rewindableStream, err := newRewindReader(stream) + if err != nil { + return nil, nil, err + } - // try compression format first, since that's the outer "layer" + // try compression format first, since that's the outer "layer" if combined for name, format := range formats { cf, isCompression := format.(Compression) if !isCompression { continue } - matchResult, err := identifyOne(format, filename, rewindableStream, nil) + matchResult, err := identifyOne(ctx, format, filename, rewindableStream, nil) if err != nil { return nil, rewindableStream.reader(), fmt.Errorf("matching %s: %w", name, err) } @@ -61,41 +69,49 @@ func Identify(filename string, stream io.Reader) (Format, io.Reader, error) { } } - // try archive format next + // try archival and extraction format next for name, format := range formats { - af, isArchive := format.(Archival) - if !isArchive { + ar, isArchive := format.(Archival) + ex, isExtract := format.(Extraction) + if !isArchive && !isExtract { continue } - matchResult, err := identifyOne(format, filename, rewindableStream, compression) + matchResult, err := identifyOne(ctx, format, filename, rewindableStream, compression) if err != nil { return nil, rewindableStream.reader(), fmt.Errorf("matching %s: %w", name, err) } if matchResult.Matched() { - archival = af + archival = ar + extraction = ex break } } - // the stream should be rewound by identifyOne + // the stream should be rewound by identifyOne; then return the most specific type of match bufferedStream := rewindableStream.reader() switch { - case compression != nil && archival == nil: + case compression != nil && archival == nil && extraction == nil: return compression, bufferedStream, nil - case compression == nil && archival != nil: + case compression == nil && archival != nil && extraction == nil: return archival, bufferedStream, nil - case compression != nil && archival != nil: - return CompressedArchive{compression, archival}, bufferedStream, nil + case compression == nil && archival == nil && extraction != nil: + return extraction, bufferedStream, nil + case archival != nil || extraction != nil: + return Archive{compression, archival, extraction}, bufferedStream, nil default: - return nil, bufferedStream, ErrNoMatch + return nil, bufferedStream, NoMatch } } -func identifyOne(format Format, filename string, stream *rewindReader, comp Compression) (mr MatchResult, err error) { +func identifyOne(ctx context.Context, format Format, filename string, stream *rewindReader, comp Compression) (mr MatchResult, err error) { defer stream.rewind() + if filename == "." { + filename = "" + } + // if looking within a compressed format, wrap the stream in a // reader that can decompress it so we can match the "inner" format // (yes, we have to make a new reader every time we do a match, @@ -107,14 +123,14 @@ func identifyOne(format Format, filename string, stream *rewindReader, comp Comp return MatchResult{}, openErr } defer decompressedStream.Close() - mr, err = format.Match(filename, decompressedStream) + mr, err = format.Match(ctx, filename, decompressedStream) } else { // Make sure we pass a nil io.Reader not a *rewindReader(nil) var r io.Reader if stream != nil { r = stream } - mr, err = format.Match(filename, r) + mr, err = format.Match(ctx, filename, r) } // if the error is EOF, we can just ignore it. @@ -150,44 +166,44 @@ func readAtMost(stream io.Reader, n int) ([]byte, error) { return nil, err } -// CompressedArchive combines a compression format on top of an archive -// format (e.g. "tar.gz") and provides both functionalities in a single -// type. It ensures that archive functions are wrapped by compressors and +// Archive represents an archive which may be compressed at the outer layer. +// It combines a compression format on top of an archive/extraction +// format (e.g. ".tar.gz") and provides both functionalities in a single +// type. It ensures that archival functions are wrapped by compressors and // decompressors. However, compressed archives have some limitations; for // example, files cannot be inserted/appended because of complexities with // modifying existing compression state (perhaps this could be overcome, // but I'm not about to try it). // -// As this type is intended to compose compression and archive formats, -// both must be specified in order for this value to be valid, or its -// methods will return errors. -type CompressedArchive struct { +// The embedded Archival and Extraction values are used for writing and +// reading, respectively. Compression is optional and is only needed if the +// format is compressed externally (for example, tar archives). +type Archive struct { Compression Archival + Extraction } -// Name returns a concatenation of the archive format name -// and the compression format name. -func (caf CompressedArchive) Name() string { - if caf.Compression == nil && caf.Archival == nil { - panic("missing both compression and archive formats") - } +// Name returns a concatenation of the archive and compression format extensions. +func (ar Archive) Extension() string { var name string - if caf.Archival != nil { - name += caf.Archival.Name() + if ar.Archival != nil { + name += ar.Archival.Extension() + } else if ar.Extraction != nil { + name += ar.Extraction.Extension() } - if caf.Compression != nil { - name += caf.Compression.Name() + if ar.Compression != nil { + name += ar.Compression.Extension() } return name } -// Match matches if the input matches both the compression and archive format. -func (caf CompressedArchive) Match(filename string, stream io.Reader) (MatchResult, error) { +// Match matches if the input matches both the compression and archival/extraction format. +func (ar Archive) Match(ctx context.Context, filename string, stream io.Reader) (MatchResult, error) { var conglomerate MatchResult - if caf.Compression != nil { - matchResult, err := caf.Compression.Match(filename, stream) + if ar.Compression != nil { + matchResult, err := ar.Compression.Match(ctx, filename, stream) if err != nil { return MatchResult{}, err } @@ -197,7 +213,7 @@ func (caf CompressedArchive) Match(filename string, stream io.Reader) (MatchResu // wrap the reader with the decompressor so we can // attempt to match the archive by reading the stream - rc, err := caf.Compression.OpenReader(stream) + rc, err := ar.Compression.OpenReader(stream) if err != nil { return matchResult, err } @@ -207,8 +223,8 @@ func (caf CompressedArchive) Match(filename string, stream io.Reader) (MatchResu conglomerate = matchResult } - if caf.Archival != nil { - matchResult, err := caf.Archival.Match(filename, stream) + if ar.Archival != nil { + matchResult, err := ar.Archival.Match(ctx, filename, stream) if err != nil { return MatchResult{}, err } @@ -223,26 +239,32 @@ func (caf CompressedArchive) Match(filename string, stream io.Reader) (MatchResu } // Archive adds files to the output archive while compressing the result. -func (caf CompressedArchive) Archive(ctx context.Context, output io.Writer, files []File) error { - if caf.Compression != nil { - wc, err := caf.Compression.OpenWriter(output) +func (ar Archive) Archive(ctx context.Context, output io.Writer, files []FileInfo) error { + if ar.Archival == nil { + return fmt.Errorf("no archival format") + } + if ar.Compression != nil { + wc, err := ar.Compression.OpenWriter(output) if err != nil { return err } defer wc.Close() output = wc } - return caf.Archival.Archive(ctx, output, files) + return ar.Archival.Archive(ctx, output, files) } // ArchiveAsync adds files to the output archive while compressing the result asynchronously. -func (caf CompressedArchive) ArchiveAsync(ctx context.Context, output io.Writer, jobs <-chan ArchiveAsyncJob) error { - do, ok := caf.Archival.(ArchiverAsync) +func (ar Archive) ArchiveAsync(ctx context.Context, output io.Writer, jobs <-chan ArchiveAsyncJob) error { + if ar.Archival == nil { + return fmt.Errorf("no archival format") + } + do, ok := ar.Archival.(ArchiverAsync) if !ok { - return fmt.Errorf("%s archive does not support async writing", caf.Name()) + return fmt.Errorf("%T archive does not support async writing", ar.Archival) } - if caf.Compression != nil { - wc, err := caf.Compression.OpenWriter(output) + if ar.Compression != nil { + wc, err := ar.Compression.OpenWriter(output) if err != nil { return err } @@ -253,16 +275,19 @@ func (caf CompressedArchive) ArchiveAsync(ctx context.Context, output io.Writer, } // Extract reads files out of an archive while decompressing the results. -func (caf CompressedArchive) Extract(ctx context.Context, sourceArchive io.Reader, pathsInArchive []string, handleFile FileHandler) error { - if caf.Compression != nil { - rc, err := caf.Compression.OpenReader(sourceArchive) +func (ar Archive) Extract(ctx context.Context, sourceArchive io.Reader, handleFile FileHandler) error { + if ar.Extraction == nil { + return fmt.Errorf("no extraction format") + } + if ar.Compression != nil { + rc, err := ar.Compression.OpenReader(sourceArchive) if err != nil { return err } defer rc.Close() sourceArchive = rc } - return caf.Archival.(Extractor).Extract(ctx, sourceArchive, pathsInArchive, handleFile) + return ar.Extraction.Extract(ctx, sourceArchive, handleFile) } // MatchResult returns true if the format was matched either @@ -285,26 +310,42 @@ func (mr MatchResult) Matched() bool { return mr.ByName || mr.ByStream } // read from the stream. This is useful for "peeking" a stream an // arbitrary number of bytes. Loosely based on the Connection type // from https://github.com/mholt/caddy-l4. +// +// If the reader is also an io.Seeker, no buffer is used, and instead +// the stream seeks back to the starting position. type rewindReader struct { io.Reader + start int64 buf *bytes.Buffer bufReader io.Reader } -func newRewindReader(r io.Reader) *rewindReader { +func newRewindReader(r io.Reader) (*rewindReader, error) { if r == nil { - return nil + return nil, nil } - return &rewindReader{ - Reader: r, - buf: new(bytes.Buffer), + + rr := &rewindReader{Reader: r} + + // avoid buffering if we have a seeker we can use + if seeker, ok := r.(io.Seeker); ok { + var err error + rr.start, err = seeker.Seek(0, io.SeekCurrent) + if err != nil { + return nil, fmt.Errorf("seek to determine current position: %w", err) + } + } else { + rr.buf = new(bytes.Buffer) } + + return rr, nil } func (rr *rewindReader) Read(p []byte) (n int, err error) { if rr == nil { - panic("internal error: reading from nil rewindReader") + panic("reading from nil rewindReader") } + // if there is a buffer we should read from, start // with that; we only read from the underlying stream // after the buffer has been "depleted" @@ -319,13 +360,13 @@ func (rr *rewindReader) Read(p []byte) (n int, err error) { } } - // buffer has been "depleted" so read from - // underlying connection + // buffer has been depleted or we are not using one, + // so read from underlying stream nr, err := rr.Reader.Read(p[n:]) // anything that was read needs to be written to - // the buffer, even if there was an error - if nr > 0 { + // the buffer (if used), even if there was an error + if nr > 0 && rr.buf != nil { if nw, errw := rr.buf.Write(p[n : n+nr]); errw != nil { return nw, errw } @@ -341,18 +382,24 @@ func (rr *rewindReader) Read(p []byte) (n int, err error) { // rewind resets the stream to the beginning by causing // Read() to start reading from the beginning of the -// buffered bytes. +// stream, or, if buffering, the buffered bytes. func (rr *rewindReader) rewind() { if rr == nil { return } + if ras, ok := rr.Reader.(io.Seeker); ok { + if _, err := ras.Seek(rr.start, io.SeekStart); err == nil { + return + } + } rr.bufReader = bytes.NewReader(rr.buf.Bytes()) } // reader returns a reader that reads first from the buffered -// bytes, then from the underlying stream. After calling this, -// no more rewinding is allowed since reads from the stream are -// not recorded, so rewinding properly is impossible. +// bytes (if buffering), then from the underlying stream; if a +// Seeker, the stream will be seeked back to the start. After +// calling this, no more rewinding is allowed since reads from +// the stream are not recorded, so rewinding properly is impossible. // If the underlying reader implements io.Seeker, then the // underlying reader will be used directly. func (rr *rewindReader) reader() io.Reader { @@ -360,23 +407,23 @@ func (rr *rewindReader) reader() io.Reader { return nil } if ras, ok := rr.Reader.(io.Seeker); ok { - if _, err := ras.Seek(0, io.SeekStart); err == nil { + if _, err := ras.Seek(rr.start, io.SeekStart); err == nil { return rr.Reader } } return io.MultiReader(bytes.NewReader(rr.buf.Bytes()), rr.Reader) } -// ErrNoMatch is returned if there are no matching formats. -var ErrNoMatch = fmt.Errorf("no formats matched") +// NoMatch is a special error returned if there are no matching formats. +var NoMatch = fmt.Errorf("no formats matched") // Registered formats. var formats = make(map[string]Format) // Interface guards var ( - _ Format = (*CompressedArchive)(nil) - _ Archiver = (*CompressedArchive)(nil) - _ ArchiverAsync = (*CompressedArchive)(nil) - _ Extractor = (*CompressedArchive)(nil) + _ Format = (*Archive)(nil) + _ Archiver = (*Archive)(nil) + _ ArchiverAsync = (*Archive)(nil) + _ Extractor = (*Archive)(nil) ) diff --git a/vendor/github.com/mholt/archiver/v4/fs.go b/vendor/github.com/mholt/archiver/v4/fs.go index a0ed59bc..56042cf6 100644 --- a/vendor/github.com/mholt/archiver/v4/fs.go +++ b/vendor/github.com/mholt/archiver/v4/fs.go @@ -9,132 +9,117 @@ import ( "os" "path" "path/filepath" - "runtime" - "sort" + "slices" "strings" "time" - - "github.com/klauspost/compress/zip" ) -// FileSystem opens the file at root as a read-only file system. The root may be a -// path to a directory, archive file, compressed archive file, compressed file, or -// any other file on disk. +// FileSystem identifies the format of the input and returns a read-only file system. +// The input can be a filename, stream, or both. // -// If root is a directory, its contents are accessed directly from the disk's file system. -// If root is an archive file, its contents can be accessed like a normal directory; -// compressed archive files are transparently decompressed as contents are accessed. -// And if root is any other file, it is the only file in the file system; if the file -// is compressed, it is transparently decompressed when read from. +// If only a filename is specified, it may be a path to a directory, archive file, +// compressed archive file, compressed regular file, or any other regular file on +// disk. If the filename is a directory, its contents are accessed directly from +// the device's file system. If the filename is an archive file, the contents can +// be accessed like a normal directory; compressed archive files are transparently +// decompressed as contents are accessed. And if the filename is any other file, it +// is the only file in the returned file system; if the file is compressed, it is +// transparently decompressed when read from. // -// This method essentially offers uniform read access to various kinds of files: -// directories, archives, compressed archives, and individual files are all treated -// the same way. +// If a stream is specified, the filename (if available) is used as a hint to help +// identify its format. Streams of archive files must be able to be made into an +// io.SectionReader (for safe concurrency) which requires io.ReaderAt and io.Seeker +// (to efficiently determine size). The automatic format identification requires +// io.Reader and will use io.Seeker if supported to avoid buffering. // -// Except for zip files, the returned FS values are guaranteed to be fs.ReadDirFS and -// fs.StatFS types, and may also be fs.SubFS. -func FileSystem(ctx context.Context, root string) (fs.FS, error) { - info, err := os.Stat(root) - if err != nil { - return nil, err - } +// Whether the data comes from disk or a stream, it is peeked at to automatically +// detect which format to use. +// +// This function essentially offers uniform read access to various kinds of files: +// directories, archives, compressed archives, individual files, and file streams +// are all treated the same way. +// +// NOTE: The performance of compressed tar archives is not great due to overhead +// with decompression. However, the fs.WalkDir() use case has been optimized to +// create an index on first call to ReadDir(). +func FileSystem(ctx context.Context, filename string, stream ReaderAtSeeker) (fs.FS, error) { + if filename == "" && stream == nil { + return nil, errors.New("no input") + } + + // if an input stream is specified, we'll use that for identification + // and for ArchiveFS (if it's an archive); but if not, we'll open the + // file and read it for identification, but in that case we won't want + // to also use it for the ArchiveFS (because we need to close what we + // opened, and ArchiveFS opens its own files), hence this separate var + idStream := stream + + // if input is only a filename (no stream), check if it's a directory; + // if not, open it so we can determine which format to use (filename + // is not always a good indicator of file format) + if filename != "" && stream == nil { + info, err := os.Stat(filename) + if err != nil { + return nil, err + } + + // real folders can be accessed easily + if info.IsDir() { + return os.DirFS(filename), nil + } - // real folders can be accessed easily - if info.IsDir() { - return DirFS(root), nil + // if any archive formats recognize this file, access it like a folder + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + idStream = file // use file for format identification only } - // if any archive formats recognize this file, access it like a folder - file, err := os.Open(root) + // normally, callers should use the Reader value returned from Identify, but + // our input is a Seeker, so we know the original input value gets returned + format, _, err := Identify(ctx, filepath.Base(filename), idStream) + if errors.Is(err, NoMatch) { + return FileFS{Path: filename}, nil // must be an ordinary file + } if err != nil { - return nil, err + return nil, fmt.Errorf("identify format: %w", err) } - defer file.Close() - format, _, err := Identify(filepath.Base(root), file) - if err != nil && !errors.Is(err, ErrNoMatch) { - return nil, err - } + switch fileFormat := format.(type) { + case Extractor: + // if no stream was input, return an ArchiveFS that relies on the filepath + if stream == nil { + return &ArchiveFS{Path: filename, Format: fileFormat, Context: ctx}, nil + } - if format != nil { - switch ff := format.(type) { - case Zip: - // zip.Reader is more performant than ArchiveFS, because zip.Reader caches content information - // and zip.Reader can open several content files concurrently because of io.ReaderAt requirement - // while ArchiveFS can't. - // zip.Reader doesn't suffer from issue #330 and #310 according to local test (but they should be fixed anyway) + // otherwise, if a stream was input, return an ArchiveFS that relies on that - // open the file anew, as our original handle will be closed when we return - file, err := os.Open(root) - if err != nil { - return nil, err - } - return zip.NewReader(file, info.Size()) - case Archival: - // TODO: we only really need Extractor and Decompressor here, not the combined interfaces... - return ArchiveFS{Path: root, Format: ff, Context: ctx}, nil - case Compression: - return FileFS{Path: root, Compression: ff}, nil + // determine size -- we know that the stream value we get back from + // Identify is the same type as what we input because it is a Seeker + size, err := streamSizeBySeeking(stream) + if err != nil { + return nil, fmt.Errorf("seeking for size: %w", err) } - } - // otherwise consider it an ordinary file; make a file system with it as its only file - return FileFS{Path: root}, nil -} + sr := io.NewSectionReader(stream, 0, size) -// DirFS allows accessing a directory on disk with a consistent file system interface. -// It is almost the same as os.DirFS, except for some reason os.DirFS only implements -// Open() and Stat(), but we also need ReadDir(). Seems like an obvious miss (as of Go 1.17) -// and I have questions: https://twitter.com/mholt6/status/1476058551432876032 -type DirFS string + return &ArchiveFS{Stream: sr, Format: fileFormat, Context: ctx}, nil -// Open opens the named file. -func (f DirFS) Open(name string) (fs.File, error) { - if err := f.checkName(name, "open"); err != nil { - return nil, err + case Compression: + return FileFS{Path: filename, Compression: fileFormat}, nil } - return os.Open(filepath.Join(string(f), name)) -} -// ReadDir returns a listing of all the files in the named directory. -func (f DirFS) ReadDir(name string) ([]fs.DirEntry, error) { - if err := f.checkName(name, "readdir"); err != nil { - return nil, err - } - return os.ReadDir(filepath.Join(string(f), name)) + return nil, fmt.Errorf("unable to create file system rooted at %s due to unsupported file or folder type", filename) } -// Stat returns info about the named file. -func (f DirFS) Stat(name string) (fs.FileInfo, error) { - if err := f.checkName(name, "stat"); err != nil { - return nil, err - } - return os.Stat(filepath.Join(string(f), name)) -} - -// Sub returns an FS corresponding to the subtree rooted at dir. -func (f DirFS) Sub(dir string) (fs.FS, error) { - if err := f.checkName(dir, "sub"); err != nil { - return nil, err - } - info, err := f.Stat(dir) - if err != nil { - return nil, err - } - if !info.IsDir() { - return nil, fmt.Errorf("%s is not a directory", dir) - } - return DirFS(filepath.Join(string(f), dir)), nil -} - -// checkName returns an error if name is not a valid path according to the docs of -// the io/fs package, with an extra cue taken from the standard lib's implementation -// of os.dirFS.Open(), which checks for invalid characters in Windows paths. -func (f DirFS) checkName(name, op string) error { - if !fs.ValidPath(name) || runtime.GOOS == "windows" && strings.ContainsAny(name, `\:`) { - return &fs.PathError{Op: op, Path: name, Err: fs.ErrInvalid} - } - return nil +// ReaderAtSeeker is a type that can read, read at, and seek. +// os.File and io.SectionReader both implement this interface. +type ReaderAtSeeker interface { + io.Reader + io.ReaderAt + io.Seeker } // FileFS allows accessing a file on disk using a consistent file system interface. @@ -169,7 +154,15 @@ func (f FileFS) Open(name string) (fs.File, error) { if err != nil { return nil, err } - return compressedFile{file, r}, nil + return compressedFile{r, closeBoth{file, r}}, nil +} + +// Stat stats the named file, which must be the file used to create the file system. +func (f FileFS) Stat(name string) (fs.FileInfo, error) { + if err := f.checkName(name, "stat"); err != nil { + return nil, err + } + return os.Stat(f.Path) } // ReadDir returns a directory listing with the file as the singular entry. @@ -184,19 +177,18 @@ func (f FileFS) ReadDir(name string) ([]fs.DirEntry, error) { return []fs.DirEntry{fs.FileInfoToDirEntry(info)}, nil } -// Stat stats the named file, which must be the file used to create the file system. -func (f FileFS) Stat(name string) (fs.FileInfo, error) { - if err := f.checkName(name, "stat"); err != nil { - return nil, err - } - return os.Stat(f.Path) -} - +// checkName ensures the name is a valid path and also, in the case of +// the FileFS, that it is either ".", the filename originally passed in +// to create the FileFS, or the base of the filename (name without path). +// Other names do not make sense for a FileFS since the FS is only 1 file. func (f FileFS) checkName(name, op string) error { + if name == f.Path { + return nil + } if !fs.ValidPath(name) { - return &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid} + return &fs.PathError{Op: op, Path: name, Err: fs.ErrInvalid} } - if name != "." && name != path.Base(f.Path) { + if name != "." && name != filepath.Base(f.Path) { return &fs.PathError{Op: op, Path: name, Err: fs.ErrNotExist} } return nil @@ -206,50 +198,66 @@ func (f FileFS) checkName(name, op string) error { // from a decompression reader, and which closes both // that reader and the underlying file. type compressedFile struct { - *os.File - decomp io.ReadCloser + io.Reader // decompressor + closeBoth // file and decompressor } -func (cf compressedFile) Read(p []byte) (int, error) { return cf.decomp.Read(p) } -func (cf compressedFile) Close() error { - err := cf.File.Close() - err2 := cf.decomp.Close() - if err2 != nil && err == nil { - err = err2 - } - return err -} - -// ArchiveFS allows accessing an archive (or a compressed archive) using a +// ArchiveFS allows reading an archive (or a compressed archive) using a // consistent file system interface. Essentially, it allows traversal and // reading of archive contents the same way as any normal directory on disk. // The contents of compressed archives are transparently decompressed. // -// A valid ArchiveFS value must set either Path or Stream. If Path is set, -// a literal file will be opened from the disk. If Stream is set, new -// SectionReaders will be implicitly created to access the stream, enabling -// safe, concurrent access. +// A valid ArchiveFS value must set either Path or Stream, but not both. +// If Path is set, a literal file will be opened from the disk. +// If Stream is set, new SectionReaders will be implicitly created to +// access the stream, enabling safe, concurrent access. // // NOTE: Due to Go's file system APIs (see package io/fs), the performance -// of ArchiveFS when used with fs.WalkDir() is poor for archives with lots -// of files (see issue #326). The fs.WalkDir() API requires listing each -// directory's contents in turn, and the only way to ensure we return the -// complete list of folder contents is to traverse the whole archive and -// build a slice; so if this is done for the root of an archive with many -// files, performance tends toward O(n^2) as the entire archive is walked -// for every folder that is enumerated (WalkDir calls ReadDir recursively). -// If you do not need each directory's contents walked in order, please -// prefer calling Extract() from an archive type directly; this will perform -// a O(n) walk of the contents in archive order, rather than the slower -// directory tree order. +// of ArchiveFS can suffer when using fs.WalkDir(). To mitigate this, +// an optimized fs.ReadDirFS has been implemented that indexes the entire +// archive on the first call to ReadDir() (since the entire archive needs +// to be walked for every call to ReadDir() anyway, as archive contents are +// often unordered). The first call to ReadDir(), i.e. near the start of the +// walk, will be slow for large archives, but should be instantaneous after. +// If you don't care about walking a file system in directory order, consider +// calling Extract() on the underlying archive format type directly, which +// walks the archive in entry order, without needing to do any sorting. +// +// Note that fs.FS implementations, including this one, reject paths starting +// with "./". This can be problematic sometimes, as it is not uncommon for +// tarballs to contain a top-level/root directory literally named ".", which +// can happen if a tarball is created in the same directory it is archiving. +// The underlying Extract() calls are faithful to entries with this name, +// but file systems have certain semantics around "." that restrict its use. +// For example, a file named "." cannot be created on a real file system +// because it is a special name that means "current directory". +// +// We had to decide whether to honor the true name in the archive, or honor +// file system semantics. Given that this is a virtual file system and other +// code using the fs.FS APIs will trip over a literal directory named ".", +// we choose to honor file system semantics. Files named "." are ignored; +// directories with this name are effectively transparent; their contents +// get promoted up a directory/level. This means a file at "./x" where "." +// is a literal directory name, its name will be passed in as "x" in +// WalkDir callbacks. If you need the raw, uninterpeted values from an +// archive, use the formats' Extract() method directly. See +// https://github.com/golang/go/issues/70155 for a little more background. +// +// This does have one negative edge case... a tar containing contents like +// [x . ./x] will have a conflict on the file named "x" because "./x" will +// also be accessed with the name of "x". type ArchiveFS struct { // set one of these Path string // path to the archive file on disk, or... Stream *io.SectionReader // ...stream from which to read archive - Format Archival // the archive format + Format Extractor // the archive format Prefix string // optional subdirectory in which to root the fs - Context context.Context // optional + Context context.Context // optional; mainly for cancellation + + // amortizing cache speeds up walks (esp. ReadDir) + contents map[string]fs.FileInfo + dirs map[string][]fs.DirEntry } // context always return a context, preferring f.Context if not nil. @@ -264,12 +272,33 @@ func (f ArchiveFS) context() context.Context { // the archive file itself will be opened as a directory file. func (f ArchiveFS) Open(name string) (fs.File, error) { if !fs.ValidPath(name) { - return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid} + return nil, &fs.PathError{Op: "open", Path: name, Err: fmt.Errorf("%w: %s", fs.ErrInvalid, name)} } - var archiveFile fs.File + // apply prefix if fs is rooted in a subtree + name = path.Join(f.Prefix, name) + + // if we've already indexed the archive, we can know quickly if the file doesn't exist, + // and we can also return directory files with their entries instantly + if f.contents != nil { + if info, found := f.contents[name]; found { + if info.IsDir() { + if entries, ok := f.dirs[name]; ok { + return &dirFile{info: info, entries: entries}, nil + } + } + } else { + if entries, found := f.dirs[name]; found { + return &dirFile{info: implicitDirInfo{implicitDirEntry{name}}, entries: entries}, nil + } + return nil, &fs.PathError{Op: "open", Path: name, Err: fmt.Errorf("open %s: %w", name, fs.ErrNotExist)} + } + } + + // if a filename is specified, open the archive file + var archiveFile *os.File var err error - if f.Path != "" { + if f.Stream == nil { archiveFile, err = os.Open(f.Path) if err != nil { return nil, err @@ -282,206 +311,131 @@ func (f ArchiveFS) Open(name string) (fs.File, error) { archiveFile.Close() } }() - } else if f.Stream != nil { - archiveFile = fakeArchiveFile{} + } else if f.Stream == nil { + return nil, fmt.Errorf("no input; one of Path or Stream must be set") } - // apply prefix if fs is rooted in a subtree - name = path.Join(f.Prefix, name) - // handle special case of opening the archive root - if name == "." && archiveFile != nil { - archiveInfo, err := archiveFile.Stat() + if name == "." { + var archiveInfo fs.FileInfo + if archiveFile != nil { + archiveInfo, err = archiveFile.Stat() + if err != nil { + return nil, err + } + } else { + archiveInfo = implicitDirInfo{ + implicitDirEntry{"."}, + } + } + var entries []fs.DirEntry + entries, err = f.ReadDir(name) if err != nil { return nil, err } - entries, err := f.ReadDir(name) - if err != nil { + if err := archiveFile.Close(); err != nil { return nil, err } return &dirFile{ - extractedFile: extractedFile{ - File: File{ - FileInfo: dirFileInfo{archiveInfo}, - NameInArchive: ".", - }, - }, + info: dirFileInfo{archiveInfo}, entries: entries, }, nil } - var ( - files []File - found bool - ) - // collect them all or stop at exact file match, note we don't stop at folder match - handler := func(_ context.Context, file File) error { - file.NameInArchive = strings.Trim(file.NameInArchive, "/") - files = append(files, file) - if file.NameInArchive == name && !file.IsDir() { - found = true - return errStopWalk - } - return nil - } - - var inputStream io.Reader = archiveFile - if f.Stream != nil { + var inputStream io.Reader + if f.Stream == nil { + inputStream = archiveFile + } else { inputStream = io.NewSectionReader(f.Stream, 0, f.Stream.Size()) } - err = f.Format.Extract(f.context(), inputStream, []string{name}, handler) - if found { - err = nil - } - if err != nil { - return nil, err - } - - if len(files) == 0 { - return nil, fs.ErrNotExist - } - - // exactly one or exact file found, test name match to detect implicit dir name https://github.com/mholt/archiver/issues/340 - if (len(files) == 1 && files[0].NameInArchive == name) || found { - file := files[len(files)-1] - if file.IsDir() { - return &dirFile{extractedFile: extractedFile{File: file}}, nil - } - - // if named file is not a regular file, it can't be opened - if !file.Mode().IsRegular() { - return extractedFile{File: file}, nil - } - - // regular files can be read, so open it for reading - rc, err := file.Open() + var decompressor io.ReadCloser + if decomp, ok := f.Format.(Decompressor); ok { + decompressor, err = decomp.OpenReader(inputStream) if err != nil { return nil, err } - return extractedFile{File: file, ReadCloser: rc, parentArchive: archiveFile}, nil + inputStream = decompressor } - // implicit files - files = fillImplicit(files) - file := search(name, files) - if file == nil { - return nil, fs.ErrNotExist - } + // prepare the handler that we'll need if we have to iterate the + // archive to find the file being requested + var fsFile fs.File + handler := func(ctx context.Context, file FileInfo) error { + if err := ctx.Err(); err != nil { + return err + } - if file.IsDir() { - return &dirFile{extractedFile: extractedFile{File: *file}, entries: openReadDir(name, files)}, nil - } + // paths in archives can't necessarily be trusted; also clean up any "./" prefix + file.NameInArchive = path.Clean(file.NameInArchive) - // very unlikely - // maybe just panic, because extractor already walk through all the entries, file is impossible to read - // unless it's from a zip file. + if !strings.HasPrefix(file.NameInArchive, name) { + return nil + } - // if named file is not a regular file, it can't be opened - if !file.Mode().IsRegular() { - return extractedFile{File: *file}, nil - } + // if this is the requested file, and it's a directory, set up the dirFile, + // which will include a listing of all its contents as we continue the walk + if file.NameInArchive == name && file.IsDir() { + fsFile = &dirFile{info: file} // will fill entries slice as we continue the walk + return nil + } - // regular files can be read, so open it for reading - rc, err := file.Open() - if err != nil { - return nil, err - } - return extractedFile{File: *file, ReadCloser: rc, parentArchive: archiveFile}, nil -} + // if the named file was a directory and we are filling its entries, + // add this entry to the list + if df, ok := fsFile.(*dirFile); ok { + df.entries = append(df.entries, fs.FileInfoToDirEntry(file)) -// copy of the same function from zip -func split(name string) (dir, elem string, isDir bool) { - if name[len(name)-1] == '/' { - isDir = true - name = name[:len(name)-1] - } - i := len(name) - 1 - for i >= 0 && name[i] != '/' { - i-- - } - if i < 0 { - return ".", name, isDir - } - return name[:i], name[i+1:], isDir -} + // don't traverse into subfolders + if file.IsDir() { + return fs.SkipDir + } -// modified from zip.Reader initFileList, it's used to find all implicit dirs -func fillImplicit(files []File) []File { - dirs := make(map[string]bool) - knownDirs := make(map[string]bool) - entries := make([]File, 0, 0) - for _, file := range files { - for dir := path.Dir(file.NameInArchive); dir != "."; dir = path.Dir(dir) { - dirs[dir] = true + return nil } - entries = append(entries, file) - if file.IsDir() { - knownDirs[file.NameInArchive] = true - } - } - for dir := range dirs { - if !knownDirs[dir] { - entries = append(entries, File{FileInfo: implicitDirInfo{implicitDirEntry{path.Base(dir)}}, NameInArchive: dir}) + + innerFile, err := file.Open() + if err != nil { + return err } - } - sort.Slice(entries, func(i, j int) bool { - fi, fj := entries[i], entries[j] - di, ei, _ := split(fi.NameInArchive) - dj, ej, _ := split(fj.NameInArchive) + fsFile = closeBoth{File: innerFile, c: archiveFile} - if di != dj { - return di < dj + if decompressor != nil { + fsFile = closeBoth{fsFile, decompressor} } - return ei < ej - }) - return entries -} -// modified from zip.Reader openLookup -func search(name string, entries []File) *File { - dir, elem, _ := split(name) - i := sort.Search(len(entries), func(i int) bool { - idir, ielem, _ := split(entries[i].NameInArchive) - return idir > dir || idir == dir && ielem >= elem - }) - if i < len(entries) { - fname := entries[i].NameInArchive - if fname == name || len(fname) == len(name)+1 && fname[len(name)] == '/' && fname[:len(name)] == name { - return &entries[i] - } + return fs.SkipAll } - return nil -} -// modified from zip.Reader openReadDir -func openReadDir(dir string, entries []File) []fs.DirEntry { - i := sort.Search(len(entries), func(i int) bool { - idir, _, _ := split(entries[i].NameInArchive) - return idir >= dir - }) - j := sort.Search(len(entries), func(j int) bool { - jdir, _, _ := split(entries[j].NameInArchive) - return jdir > dir - }) - dirs := make([]fs.DirEntry, j-i) - for idx := range dirs { - dirs[idx] = fs.FileInfoToDirEntry(entries[i+idx]) - } - return dirs + // when we start the walk, we pass in a nil list of files to extract, since + // files may have a "." component in them, and the underlying format doesn't + // know about our file system semantics, so we need to filter ourselves (it's + // not significantly less efficient). + if ar, ok := f.Format.(Archive); ok { + // bypass the CompressedArchive format's opening of the decompressor, since + // we already did it because we need to keep it open after returning. + // "I BYPASSED THE COMPRESSOR!" -Rey + err = ar.Extraction.Extract(f.context(), inputStream, handler) + } else { + err = f.Format.Extract(f.context(), inputStream, handler) + } + if err != nil { + return nil, &fs.PathError{Op: "open", Path: name, Err: fmt.Errorf("extract: %w", err)} + } + if fsFile == nil { + return nil, &fs.PathError{Op: "open", Path: name, Err: fmt.Errorf("open %s: %w", name, fs.ErrNotExist)} + } + + return fsFile, nil } // Stat stats the named file from within the archive. If name is "." then // the archive file itself is statted and treated as a directory file. func (f ArchiveFS) Stat(name string) (fs.FileInfo, error) { if !fs.ValidPath(name) { - return nil, &fs.PathError{Op: "stat", Path: name, Err: fs.ErrInvalid} + return nil, &fs.PathError{Op: "stat", Path: name, Err: fmt.Errorf("%s: %w", name, fs.ErrInvalid)} } - // apply prefix if fs is rooted in a subtree - name = path.Join(f.Prefix, name) - if name == "." { if f.Path != "" { fileInfo, err := os.Stat(f.Path) @@ -494,6 +448,17 @@ func (f ArchiveFS) Stat(name string) (fs.FileInfo, error) { } } + // apply prefix if fs is rooted in a subtree + name = path.Join(f.Prefix, name) + + // if archive has already been indexed, simply use it + if f.contents != nil { + if info, ok := f.contents[name]; ok { + return info, nil + } + return nil, &fs.PathError{Op: "stat", Path: name, Err: fmt.Errorf("stat %s: %w", name, fs.ErrNotExist)} + } + var archiveFile *os.File var err error if f.Stream == nil { @@ -504,16 +469,14 @@ func (f ArchiveFS) Stat(name string) (fs.FileInfo, error) { defer archiveFile.Close() } - var ( - files []File - found bool - ) - handler := func(_ context.Context, file File) error { - file.NameInArchive = strings.Trim(file.NameInArchive, "/") - files = append(files, file) - if file.NameInArchive == name { - found = true - return errStopWalk + var result FileInfo + handler := func(ctx context.Context, file FileInfo) error { + if err := ctx.Err(); err != nil { + return err + } + if path.Clean(file.NameInArchive) == name { + result = file + return fs.SkipAll } return nil } @@ -521,32 +484,38 @@ func (f ArchiveFS) Stat(name string) (fs.FileInfo, error) { if f.Stream != nil { inputStream = io.NewSectionReader(f.Stream, 0, f.Stream.Size()) } - err = f.Format.Extract(f.context(), inputStream, []string{name}, handler) - if found { - err = nil - } - if err != nil { + err = f.Format.Extract(f.context(), inputStream, handler) + if err != nil && result.FileInfo == nil { return nil, err } - - if (len(files) == 0 && files[0].NameInArchive == name) || found { - return files[len(files)-1].FileInfo, nil - } - - files = fillImplicit(files) - file := search(name, files) - if file == nil { + if result.FileInfo == nil { return nil, fs.ErrNotExist } - return file.FileInfo, nil + return result.FileInfo, nil } -// ReadDir reads the named directory from within the archive. -func (f ArchiveFS) ReadDir(name string) ([]fs.DirEntry, error) { +// ReadDir reads the named directory from within the archive. If name is "." +// then the root of the archive content is listed. +func (f *ArchiveFS) ReadDir(name string) ([]fs.DirEntry, error) { if !fs.ValidPath(name) { return nil, &fs.PathError{Op: "readdir", Path: name, Err: fs.ErrInvalid} } + // apply prefix if fs is rooted in a subtree + name = path.Join(f.Prefix, name) + + // fs.WalkDir() calls ReadDir() once per directory, and for archives with + // lots of directories, that is very slow, since we have to traverse the + // entire archive in order to ensure that we got all the entries for a + // directory -- so we can fast-track this lookup if we've done the + // traversal already + if len(f.dirs) > 0 { + return f.dirs[name], nil + } + + f.contents = make(map[string]fs.FileInfo) + f.dirs = make(map[string][]fs.DirEntry) + var archiveFile *os.File var err error if f.Stream == nil { @@ -557,28 +526,72 @@ func (f ArchiveFS) ReadDir(name string) ([]fs.DirEntry, error) { defer archiveFile.Close() } - // apply prefix if fs is rooted in a subtree - name = path.Join(f.Prefix, name) + handler := func(ctx context.Context, file FileInfo) error { + if err := ctx.Err(); err != nil { + return err + } + + // can't always trust path names + file.NameInArchive = path.Clean(file.NameInArchive) + + // avoid infinite walk; apparently, creating a tar file in the target + // directory may result in an entry called "." in the archive; see #384 + if file.NameInArchive == "." { + return nil + } - // collect all files with prefix - var ( - files []File - foundFile bool - ) - handler := func(_ context.Context, file File) error { - file.NameInArchive = strings.Trim(file.NameInArchive, "/") - files = append(files, file) + // if the name being requested isn't a directory, return an error similar to + // what most OSes return from the readdir system call when given a non-dir if file.NameInArchive == name && !file.IsDir() { - foundFile = true - return errStopWalk + return &fs.PathError{Op: "readdir", Path: name, Err: errors.New("not a directory")} } - return nil - } - // handle special case of reading from root of archive - var filter []string - if name != "." { - filter = []string{name} + // index this file info for quick access + f.contents[file.NameInArchive] = file + + // this is a real directory; prefer its DirEntry over an implicit/fake one we may have created earlier; + // first try to find if it exists, and if so, replace the value; otherwise insert it in sorted position + if file.IsDir() { + dirEntry := fs.FileInfoToDirEntry(file) + idx, found := slices.BinarySearchFunc(f.dirs[path.Dir(file.NameInArchive)], dirEntry, func(a, b fs.DirEntry) int { + return strings.Compare(a.Name(), b.Name()) + }) + if found { + f.dirs[path.Dir(file.NameInArchive)][idx] = dirEntry + } else { + f.dirs[path.Dir(file.NameInArchive)] = slices.Insert(f.dirs[path.Dir(file.NameInArchive)], idx, dirEntry) + } + } + + // this loop looks like an abomination, but it's really quite simple: we're + // just iterating the directories of the path up to the root; i.e. we lob off + // the base (last component) of the path until no separators remain, i.e. only + // one component remains -- then loop again to make sure it's not a duplicate + for dir, base := path.Dir(file.NameInArchive), path.Base(file.NameInArchive); ; dir, base = path.Dir(dir), path.Base(dir) { + if err := ctx.Err(); err != nil { + return err + } + + var dirInfo fs.DirEntry = implicitDirInfo{implicitDirEntry{base}} + + // we are "filling in" any directories that could potentially be only implicit, + // and since a nested directory can have more than 1 item, we need to prevent + // duplication; for example: given a/b/c and a/b/d, we need to avoid adding + // an entry for "b" twice within "a" -- hence we search for it first, and if + // it doesn't already exist, we insert it in sorted position + idx, found := slices.BinarySearchFunc(f.dirs[dir], dirInfo, func(a, b fs.DirEntry) int { + return strings.Compare(a.Name(), b.Name()) + }) + if !found { + f.dirs[dir] = slices.Insert(f.dirs[dir], idx, dirInfo) + } + + if dir == "." { + break + } + } + + return nil } var inputStream io.Reader = archiveFile @@ -586,30 +599,18 @@ func (f ArchiveFS) ReadDir(name string) ([]fs.DirEntry, error) { inputStream = io.NewSectionReader(f.Stream, 0, f.Stream.Size()) } - err = f.Format.Extract(f.context(), inputStream, filter, handler) - if foundFile { - return nil, &fs.PathError{Op: "readdir", Path: name, Err: errors.New("not a dir")} - } + err = f.Format.Extract(f.context(), inputStream, handler) if err != nil { - return nil, err - } - - // always find all implicit directories - files = fillImplicit(files) - // and return early for dot file - if name == "." { - return openReadDir(name, files), nil + // these being non-nil implies that we have indexed the archive, + // but if an error occurred, we likely only got part of the way + // through and our index is incomplete, and we'd have to re-walk + // the whole thing anyway; so reset these to nil to avoid bugs + f.dirs = nil + f.contents = nil + return nil, fmt.Errorf("extract: %w", err) } - file := search(name, files) - if file == nil { - return nil, fs.ErrNotExist - } - - if !file.IsDir() { - return nil, &fs.PathError{Op: "readdir", Path: name, Err: errors.New("not a dir")} - } - return openReadDir(name, files), nil + return f.dirs[name], nil } // Sub returns an FS corresponding to the subtree rooted at dir. @@ -624,6 +625,11 @@ func (f *ArchiveFS) Sub(dir string) (fs.FS, error) { if !info.IsDir() { return nil, fmt.Errorf("%s is not a directory", dir) } + // result is the same as what we're starting with, except + // we indicate a path prefix to be used for all operations; + // the reason we don't append to the Path field directly + // is because the input might be a stream rather than a + // path on disk, and the Prefix field is applied on both result := f result.Prefix = dir return result, nil @@ -693,44 +699,18 @@ func pathWithoutTopDir(fpath string) string { return fpath[slashIdx+1:] } -// errStopWalk is an arbitrary error value, since returning -// any error (other than fs.SkipDir) will stop a walk. We -// use this as we may only want 1 file from an extraction, -// even if that file is a directory and would otherwise be -// traversed during the walk. -var errStopWalk = fmt.Errorf("stop walk") - -type fakeArchiveFile struct{} - -func (f fakeArchiveFile) Stat() (fs.FileInfo, error) { - return implicitDirInfo{ - implicitDirEntry{name: "."}, - }, nil -} -func (f fakeArchiveFile) Read([]byte) (int, error) { return 0, io.EOF } -func (f fakeArchiveFile) Close() error { return nil } - // dirFile implements the fs.ReadDirFile interface. type dirFile struct { - extractedFile - - // TODO: We could probably be more memory-efficient by not loading - // all the entries at once and then "faking" the paging for ReadDir(). - // Instead, we could maybe store a reference to the parent archive FS, - // then walk it each time ReadDir is called, skipping entriesRead - // files, then continuing the listing, until n are listed. But that - // might be kinda messy and a lot of work, so I leave it for a future - // optimization if needed. + info fs.FileInfo entries []fs.DirEntry - entriesRead int + entriesRead int // used for paging with ReadDir(n) } -// If this represents the root of the archive, we use the archive's -// FileInfo which says it's a file, not a directory; the whole point -// of this package is to treat the archive as a directory, so always -// return true in our case. -func (dirFile) IsDir() bool { return true } +func (dirFile) Read([]byte) (int, error) { return 0, errors.New("cannot read a directory file") } +func (df dirFile) Stat() (fs.FileInfo, error) { return df.info, nil } +func (dirFile) Close() error { return nil } +// ReadDir implements [fs.ReadDirFile]. func (df *dirFile) ReadDir(n int) ([]fs.DirEntry, error) { if n <= 0 { return df.entries, nil @@ -759,45 +739,49 @@ func (dirFileInfo) Size() int64 { return 0 } func (info dirFileInfo) Mode() fs.FileMode { return info.FileInfo.Mode() | fs.ModeDir } func (dirFileInfo) IsDir() bool { return true } -// extractedFile implements fs.File, thus it represents an "opened" file, -// which is slightly different from our File type which represents a file -// that possibly may be opened. If the file is actually opened, this type -// ensures that the parent archive is closed when this file from within it -// is also closed. -type extractedFile struct { - File - - // Set these fields if a "regular file" which has actual content - // that can be read, i.e. a file that is open for reading. - // ReadCloser should be the file's reader, and parentArchive is - // a reference to the archive the files comes out of. - // If parentArchive is set, it will also be closed along with - // the file when Close() is called. +// fileInArchive represents a file that is opened from within an archive. +// It implements fs.File. +type fileInArchive struct { io.ReadCloser - parentArchive io.Closer + info fs.FileInfo } -// Close closes the the current file if opened and -// the parent archive if specified. This is a no-op -// for directories which do not set those fields. -func (ef extractedFile) Close() error { - if ef.parentArchive != nil { - if err := ef.parentArchive.Close(); err != nil { - return err +func (af fileInArchive) Stat() (fs.FileInfo, error) { return af.info, nil } + +// closeBoth closes both the file and an associated +// closer, such as a (de)compressor that wraps the +// reading/writing of the file. See issue #365. If a +// better solution is found, I'd probably prefer that. +type closeBoth struct { + fs.File + c io.Closer // usually the archive or the decompressor +} + +// Close closes both the file and the associated closer. It always calls +// Close() on both, but if multiple errors occur they are wrapped together. +func (dc closeBoth) Close() error { + var err error + if dc.File != nil { + if err2 := dc.File.Close(); err2 != nil { + err = fmt.Errorf("closing file: %w", err2) } } - if ef.ReadCloser != nil { - return ef.ReadCloser.Close() + if dc.c != nil { + if err2 := dc.c.Close(); err2 != nil { + if err == nil { + err = fmt.Errorf("closing closer: %w", err2) + } else { + err = fmt.Errorf("%w; additionally, closing closer: %w", err, err2) + } + } } - return nil + return err } // implicitDirEntry represents a directory that does // not actually exist in the archive but is inferred // from the paths of actual files in the archive. -type implicitDirEntry struct { - name string -} +type implicitDirEntry struct{ name string } func (e implicitDirEntry) Name() string { return e.name } func (implicitDirEntry) IsDir() bool { return true } @@ -811,22 +795,16 @@ func (e implicitDirEntry) Info() (fs.FileInfo, error) { // not contain actual entries for a directory, but we need to // pretend it exists so its contents can be discovered and // traversed. -type implicitDirInfo struct { - implicitDirEntry -} +type implicitDirInfo struct{ implicitDirEntry } func (d implicitDirInfo) Name() string { return d.name } func (implicitDirInfo) Size() int64 { return 0 } func (d implicitDirInfo) Mode() fs.FileMode { return d.Type() } func (implicitDirInfo) ModTime() time.Time { return time.Time{} } -func (implicitDirInfo) Sys() interface{} { return nil } +func (implicitDirInfo) Sys() any { return nil } // Interface guards var ( - _ fs.ReadDirFS = (*DirFS)(nil) - _ fs.StatFS = (*DirFS)(nil) - _ fs.SubFS = (*DirFS)(nil) - _ fs.ReadDirFS = (*FileFS)(nil) _ fs.StatFS = (*FileFS)(nil) diff --git a/vendor/github.com/mholt/archiver/v4/gz.go b/vendor/github.com/mholt/archiver/v4/gz.go index e747d030..e8b3f98d 100644 --- a/vendor/github.com/mholt/archiver/v4/gz.go +++ b/vendor/github.com/mholt/archiver/v4/gz.go @@ -2,6 +2,7 @@ package archiver import ( "bytes" + "context" "io" "strings" @@ -20,18 +21,22 @@ type Gz struct { // than no compression. CompressionLevel int + // DisableMultistream controls whether the reader supports multistream files. + // See https://pkg.go.dev/compress/gzip#example-Reader.Multistream + DisableMultistream bool + // Use a fast parallel Gzip implementation. This is only // effective for large streams (about 1 MB or greater). Multithreaded bool } -func (Gz) Name() string { return ".gz" } +func (Gz) Extension() string { return ".gz" } -func (gz Gz) Match(filename string, stream io.Reader) (MatchResult, error) { +func (gz Gz) Match(_ context.Context, filename string, stream io.Reader) (MatchResult, error) { var mr MatchResult // match filename - if strings.Contains(strings.ToLower(filename), gz.Name()) { + if strings.Contains(strings.ToLower(filename), gz.Extension()) { mr.ByName = true } @@ -65,14 +70,19 @@ func (gz Gz) OpenWriter(w io.Writer) (io.WriteCloser, error) { } func (gz Gz) OpenReader(r io.Reader) (io.ReadCloser, error) { - var rc io.ReadCloser - var err error if gz.Multithreaded { - rc, err = pgzip.NewReader(r) - } else { - rc, err = gzip.NewReader(r) + gzR, err := pgzip.NewReader(r) + if gzR != nil && gz.DisableMultistream { + gzR.Multistream(false) + } + return gzR, err + } + + gzR, err := gzip.NewReader(r) + if gzR != nil && gz.DisableMultistream { + gzR.Multistream(false) } - return rc, err + return gzR, err } // magic number at the beginning of gzip files diff --git a/vendor/github.com/mholt/archiver/v4/interfaces.go b/vendor/github.com/mholt/archiver/v4/interfaces.go index bfc53163..fd817864 100644 --- a/vendor/github.com/mholt/archiver/v4/interfaces.go +++ b/vendor/github.com/mholt/archiver/v4/interfaces.go @@ -5,10 +5,12 @@ import ( "io" ) -// Format represents either an archive or compression format. +// Format represents a way of getting data out of something else. +// A format usually represents compression or an archive (or both). type Format interface { - // Name returns the name of the format. - Name() string + // Extension returns the conventional file extension for this + // format. + Extension() string // Match returns true if the given name/stream is recognized. // One of the arguments is optional: filename might be empty @@ -21,7 +23,7 @@ type Format interface { // preserve the stream through matching, you should either // buffer what is read by Match, or seek to the last position // before Match was called. - Match(filename string, stream io.Reader) (MatchResult, error) + Match(ctx context.Context, filename string, stream io.Reader) (MatchResult, error) } // Compression is a compression format with both compress and decompress methods. @@ -31,10 +33,15 @@ type Compression interface { Decompressor } -// Archival is an archival format with both archive and extract methods. +// Archival is an archival format that can create/write archives. type Archival interface { Format Archiver +} + +// Extraction is an archival format that extract from (read) archives. +type Extraction interface { + Format Extractor } @@ -57,13 +64,13 @@ type Archiver interface { // Archive writes an archive file to output with the given files. // // Context cancellation must be honored. - Archive(ctx context.Context, output io.Writer, files []File) error + Archive(ctx context.Context, output io.Writer, files []FileInfo) error } // ArchiveAsyncJob contains a File to be archived and a channel that // the result of the archiving should be returned on. type ArchiveAsyncJob struct { - File File + File FileInfo Result chan<- error } @@ -83,20 +90,22 @@ type ArchiverAsync interface { // Extractor can extract files from an archive. type Extractor interface { - // Extract reads the files at pathsInArchive from sourceArchive. - // If pathsInArchive is nil, all files are extracted without discretion. - // If pathsInArchive is empty, no files are extracted. - // If a path refers to a directory, all files within it are extracted. - // Extracted files are passed to the handleFile callback for handling. + // Extract walks entries in the archive and calls handleFile for each + // entry in the archive. + // + // Any files opened in the FileHandler should be closed when it returns, + // as there is no guarantee the files can be read outside the handler + // or after the walk has proceeded to the next file. // // Context cancellation must be honored. - Extract(ctx context.Context, sourceArchive io.Reader, pathsInArchive []string, handleFile FileHandler) error + Extract(ctx context.Context, archive io.Reader, handleFile FileHandler) error } // Inserter can insert files into an existing archive. +// EXPERIMENTAL: This API is subject to change. type Inserter interface { // Insert inserts the files into archive. // // Context cancellation must be honored. - Insert(ctx context.Context, archive io.ReadWriteSeeker, files []File) error + Insert(ctx context.Context, archive io.ReadWriteSeeker, files []FileInfo) error } diff --git a/vendor/github.com/mholt/archiver/v4/lz4.go b/vendor/github.com/mholt/archiver/v4/lz4.go index aaa22a54..7425ad2a 100644 --- a/vendor/github.com/mholt/archiver/v4/lz4.go +++ b/vendor/github.com/mholt/archiver/v4/lz4.go @@ -2,6 +2,7 @@ package archiver import ( "bytes" + "context" "io" "strings" @@ -17,13 +18,13 @@ type Lz4 struct { CompressionLevel int } -func (Lz4) Name() string { return ".lz4" } +func (Lz4) Extension() string { return ".lz4" } -func (lz Lz4) Match(filename string, stream io.Reader) (MatchResult, error) { +func (lz Lz4) Match(_ context.Context, filename string, stream io.Reader) (MatchResult, error) { var mr MatchResult // match filename - if strings.Contains(strings.ToLower(filename), lz.Name()) { + if strings.Contains(strings.ToLower(filename), lz.Extension()) { mr.ByName = true } diff --git a/vendor/github.com/mholt/archiver/v4/lzip.go b/vendor/github.com/mholt/archiver/v4/lzip.go new file mode 100644 index 00000000..1cbffa50 --- /dev/null +++ b/vendor/github.com/mholt/archiver/v4/lzip.go @@ -0,0 +1,54 @@ +package archiver + +import ( + "bytes" + "context" + "io" + "path/filepath" + "strings" + + "github.com/sorairolake/lzip-go" +) + +func init() { + RegisterFormat(Lzip{}) +} + +// Lzip facilitates lzip compression. +type Lzip struct{} + +func (Lzip) Extension() string { return ".lz" } + +func (lz Lzip) Match(_ context.Context, filename string, stream io.Reader) (MatchResult, error) { + var mr MatchResult + + // match filename + if filepath.Ext(strings.ToLower(filename)) == lz.Extension() { + mr.ByName = true + } + + // match file header + buf, err := readAtMost(stream, len(lzipHeader)) + if err != nil { + return mr, err + } + mr.ByStream = bytes.Equal(buf, lzipHeader) + + return mr, nil +} + +func (Lzip) OpenWriter(w io.Writer) (io.WriteCloser, error) { + return lzip.NewWriter(w), nil +} + +func (Lzip) OpenReader(r io.Reader) (io.ReadCloser, error) { + lzr, err := lzip.NewReader(r) + if err != nil { + return nil, err + } + return io.NopCloser(lzr), err +} + +// magic number at the beginning of lzip files +// https://datatracker.ietf.org/doc/html/draft-diaz-lzip-09#section-2 +var lzipHeader = []byte("LZIP") diff --git a/vendor/github.com/mholt/archiver/v4/rar.go b/vendor/github.com/mholt/archiver/v4/rar.go index 61e55e57..8ca559e6 100644 --- a/vendor/github.com/mholt/archiver/v4/rar.go +++ b/vendor/github.com/mholt/archiver/v4/rar.go @@ -30,13 +30,13 @@ type Rar struct { Password string } -func (Rar) Name() string { return ".rar" } +func (Rar) Extension() string { return ".rar" } -func (r Rar) Match(filename string, stream io.Reader) (MatchResult, error) { +func (r Rar) Match(_ context.Context, filename string, stream io.Reader) (MatchResult, error) { var mr MatchResult // match filename - if strings.Contains(strings.ToLower(filename), r.Name()) { + if strings.Contains(strings.ToLower(filename), r.Extension()) { mr.ByName = true } @@ -56,12 +56,9 @@ func (r Rar) Match(filename string, stream io.Reader) (MatchResult, error) { return mr, nil } -// Archive is not implemented for RAR, but the method exists so that Rar satisfies the ArchiveFormat interface. -func (r Rar) Archive(_ context.Context, _ io.Writer, _ []File) error { - return fmt.Errorf("not implemented because RAR is a proprietary format") -} +// Archive is not implemented for RAR because it is patent-encumbered. -func (r Rar) Extract(ctx context.Context, sourceArchive io.Reader, pathsInArchive []string, handleFile FileHandler) error { +func (r Rar) Extract(ctx context.Context, sourceArchive io.Reader, handleFile FileHandler) error { var options []rardecode.Option if r.Password != "" { options = append(options, rardecode.Password(r.Password)) @@ -91,22 +88,24 @@ func (r Rar) Extract(ctx context.Context, sourceArchive io.Reader, pathsInArchiv } return err } - if !fileIsIncluded(pathsInArchive, hdr.Name) { - continue - } if fileIsIncluded(skipDirs, hdr.Name) { continue } - file := File{ - FileInfo: rarFileInfo{hdr}, + info := rarFileInfo{hdr} + file := FileInfo{ + FileInfo: info, Header: hdr, NameInArchive: hdr.Name, - Open: func() (io.ReadCloser, error) { return io.NopCloser(rr), nil }, + Open: func() (fs.File, error) { + return fileInArchive{io.NopCloser(rr), info}, nil + }, } err = handleFile(ctx, file) - if errors.Is(err, fs.SkipDir) { + if errors.Is(err, fs.SkipAll) { + break + } else if errors.Is(err, fs.SkipDir) { // if a directory, skip this path; if a file, skip the folder path dirPath := hdr.Name if !hdr.IsDir { @@ -131,9 +130,12 @@ func (rfi rarFileInfo) Size() int64 { return rfi.fh.UnPackedSize } func (rfi rarFileInfo) Mode() os.FileMode { return rfi.fh.Mode() } func (rfi rarFileInfo) ModTime() time.Time { return rfi.fh.ModificationTime } func (rfi rarFileInfo) IsDir() bool { return rfi.fh.IsDir } -func (rfi rarFileInfo) Sys() interface{} { return nil } +func (rfi rarFileInfo) Sys() any { return nil } var ( rarHeaderV1_5 = []byte("Rar!\x1a\x07\x00") // v1.5 rarHeaderV5_0 = []byte("Rar!\x1a\x07\x01\x00") // v5.0 ) + +// Interface guard +var _ Extractor = Rar{} diff --git a/vendor/github.com/mholt/archiver/v4/sz.go b/vendor/github.com/mholt/archiver/v4/sz.go index 9d10604a..8a926b7f 100644 --- a/vendor/github.com/mholt/archiver/v4/sz.go +++ b/vendor/github.com/mholt/archiver/v4/sz.go @@ -2,6 +2,7 @@ package archiver import ( "bytes" + "context" "io" "strings" @@ -15,13 +16,13 @@ func init() { // Sz facilitates Snappy compression. type Sz struct{} -func (sz Sz) Name() string { return ".sz" } +func (sz Sz) Extension() string { return ".sz" } -func (sz Sz) Match(filename string, stream io.Reader) (MatchResult, error) { +func (sz Sz) Match(_ context.Context, filename string, stream io.Reader) (MatchResult, error) { var mr MatchResult // match filename - if strings.Contains(strings.ToLower(filename), sz.Name()) { + if strings.Contains(strings.ToLower(filename), sz.Extension()) { mr.ByName = true } diff --git a/vendor/github.com/mholt/archiver/v4/tar.go b/vendor/github.com/mholt/archiver/v4/tar.go index ce719695..d84fed43 100644 --- a/vendor/github.com/mholt/archiver/v4/tar.go +++ b/vendor/github.com/mholt/archiver/v4/tar.go @@ -17,19 +17,22 @@ func init() { } type Tar struct { + // If true, preserve only numeric user and group id + NumericUIDGID bool + // If true, errors encountered during reading or writing // a file within an archive will be logged and the // operation will continue on remaining files. ContinueOnError bool } -func (Tar) Name() string { return ".tar" } +func (Tar) Extension() string { return ".tar" } -func (t Tar) Match(filename string, stream io.Reader) (MatchResult, error) { +func (t Tar) Match(_ context.Context, filename string, stream io.Reader) (MatchResult, error) { var mr MatchResult // match filename - if strings.Contains(strings.ToLower(filename), t.Name()) { + if strings.Contains(strings.ToLower(filename), t.Extension()) { mr.ByName = true } @@ -43,7 +46,7 @@ func (t Tar) Match(filename string, stream io.Reader) (MatchResult, error) { return mr, nil } -func (t Tar) Archive(ctx context.Context, output io.Writer, files []File) error { +func (t Tar) Archive(ctx context.Context, output io.Writer, files []FileInfo) error { tw := tar.NewWriter(output) defer tw.Close() @@ -71,7 +74,7 @@ func (t Tar) ArchiveAsync(ctx context.Context, output io.Writer, jobs <-chan Arc return nil } -func (Tar) writeFileToArchive(ctx context.Context, tw *tar.Writer, file File) error { +func (t Tar) writeFileToArchive(ctx context.Context, tw *tar.Writer, file FileInfo) error { if err := ctx.Err(); err != nil { return err // honor context cancellation } @@ -81,6 +84,13 @@ func (Tar) writeFileToArchive(ctx context.Context, tw *tar.Writer, file File) er return fmt.Errorf("file %s: creating header: %w", file.NameInArchive, err) } hdr.Name = file.NameInArchive // complete path, since FileInfoHeader() only has base name + if hdr.Name == "" { + hdr.Name = file.Name() // assume base name of file I guess + } + if t.NumericUIDGID { + hdr.Uname = "" + hdr.Gname = "" + } if err := tw.WriteHeader(hdr); err != nil { return fmt.Errorf("file %s: writing header: %w", file.NameInArchive, err) @@ -99,7 +109,7 @@ func (Tar) writeFileToArchive(ctx context.Context, tw *tar.Writer, file File) er return nil } -func (t Tar) Insert(ctx context.Context, into io.ReadWriteSeeker, files []File) error { +func (t Tar) Insert(ctx context.Context, into io.ReadWriteSeeker, files []FileInfo) error { // Tar files may end with some, none, or a lot of zero-byte padding. The spec says // it should end with two 512-byte trailer records consisting solely of null/0 // bytes: https://www.gnu.org/software/tar/manual/html_node/Standard.html. However, @@ -169,7 +179,7 @@ func (t Tar) Insert(ctx context.Context, into io.ReadWriteSeeker, files []File) return nil } -func (t Tar) Extract(ctx context.Context, sourceArchive io.Reader, pathsInArchive []string, handleFile FileHandler) error { +func (t Tar) Extract(ctx context.Context, sourceArchive io.Reader, handleFile FileHandler) error { tr := tar.NewReader(sourceArchive) // important to initialize to non-nil, empty value due to how fileIsIncluded works @@ -191,9 +201,6 @@ func (t Tar) Extract(ctx context.Context, sourceArchive io.Reader, pathsInArchiv } return err } - if !fileIsIncluded(pathsInArchive, hdr.Name) { - continue - } if fileIsIncluded(skipDirs, hdr.Name) { continue } @@ -202,16 +209,27 @@ func (t Tar) Extract(ctx context.Context, sourceArchive io.Reader, pathsInArchiv continue } - file := File{ - FileInfo: hdr.FileInfo(), + info := hdr.FileInfo() + file := FileInfo{ + FileInfo: info, Header: hdr, NameInArchive: hdr.Name, LinkTarget: hdr.Linkname, - Open: func() (io.ReadCloser, error) { return io.NopCloser(tr), nil }, + Open: func() (fs.File, error) { + return fileInArchive{io.NopCloser(tr), info}, nil + }, } err = handleFile(ctx, file) - if errors.Is(err, fs.SkipDir) { + if errors.Is(err, fs.SkipAll) { + // At first, I wasn't sure if fs.SkipAll implied that the rest of the entries + // should still be iterated and just "skipped" (i.e. no-ops) or if the walk + // should stop; both have the same net effect, one is just less efficient... + // apparently the name of fs.StopWalk was the preferred name, but it still + // became fs.SkipAll because of semantics with documentation; see + // https://github.com/golang/go/issues/47209 -- anyway, the walk should stop. + break + } else if errors.Is(err, fs.SkipDir) { // if a directory, skip this path; if a file, skip the folder path dirPath := hdr.Name if hdr.Typeflag != tar.TypeDir { diff --git a/vendor/github.com/mholt/archiver/v4/xz.go b/vendor/github.com/mholt/archiver/v4/xz.go index 4e1b6b41..edb61373 100644 --- a/vendor/github.com/mholt/archiver/v4/xz.go +++ b/vendor/github.com/mholt/archiver/v4/xz.go @@ -2,6 +2,7 @@ package archiver import ( "bytes" + "context" "io" "strings" @@ -16,13 +17,13 @@ func init() { // Xz facilitates xz compression. type Xz struct{} -func (Xz) Name() string { return ".xz" } +func (Xz) Extension() string { return ".xz" } -func (x Xz) Match(filename string, stream io.Reader) (MatchResult, error) { +func (x Xz) Match(_ context.Context, filename string, stream io.Reader) (MatchResult, error) { var mr MatchResult // match filename - if strings.Contains(strings.ToLower(filename), x.Name()) { + if strings.Contains(strings.ToLower(filename), x.Extension()) { mr.ByName = true } diff --git a/vendor/github.com/mholt/archiver/v4/zip.go b/vendor/github.com/mholt/archiver/v4/zip.go index 421fe6ec..1de5b516 100644 --- a/vendor/github.com/mholt/archiver/v4/zip.go +++ b/vendor/github.com/mholt/archiver/v4/zip.go @@ -11,6 +11,8 @@ import ( "path" "strings" + szip "github.com/STARRY-S/zip" + "github.com/dsnet/compress/bzip2" "github.com/klauspost/compress/zip" "github.com/klauspost/compress/zstd" @@ -81,13 +83,13 @@ type Zip struct { TextEncoding string } -func (z Zip) Name() string { return ".zip" } +func (z Zip) Extension() string { return ".zip" } -func (z Zip) Match(filename string, stream io.Reader) (MatchResult, error) { +func (z Zip) Match(_ context.Context, filename string, stream io.Reader) (MatchResult, error) { var mr MatchResult // match filename - if strings.Contains(strings.ToLower(filename), z.Name()) { + if strings.Contains(strings.ToLower(filename), z.Extension()) { mr.ByName = true } @@ -101,7 +103,7 @@ func (z Zip) Match(filename string, stream io.Reader) (MatchResult, error) { return mr, nil } -func (z Zip) Archive(ctx context.Context, output io.Writer, files []File) error { +func (z Zip) Archive(ctx context.Context, output io.Writer, files []FileInfo) error { zw := zip.NewWriter(output) defer zw.Close() @@ -127,7 +129,7 @@ func (z Zip) ArchiveAsync(ctx context.Context, output io.Writer, jobs <-chan Arc return nil } -func (z Zip) archiveOneFile(ctx context.Context, zw *zip.Writer, idx int, file File) error { +func (z Zip) archiveOneFile(ctx context.Context, zw *zip.Writer, idx int, file FileInfo) error { if err := ctx.Err(); err != nil { return err // honor context cancellation } @@ -137,6 +139,9 @@ func (z Zip) archiveOneFile(ctx context.Context, zw *zip.Writer, idx int, file F return fmt.Errorf("getting info for file %d: %s: %w", idx, file.Name(), err) } hdr.Name = file.NameInArchive // complete path, since FileInfoHeader() only has base name + if hdr.Name == "" { + hdr.Name = file.Name() // assume base name of file I guess + } // customize header based on file properties if file.IsDir() { @@ -152,6 +157,8 @@ func (z Zip) archiveOneFile(ctx context.Context, zw *zip.Writer, idx int, file F } else { hdr.Method = z.Compression } + } else { + hdr.Method = z.Compression } w, err := zw.CreateHeader(hdr) @@ -176,7 +183,7 @@ func (z Zip) archiveOneFile(ctx context.Context, zw *zip.Writer, idx int, file F // the interface because we figure you can Read() from anything you can ReadAt() or Seek() // with. Due to the nature of the zip archive format, if sourceArchive is not an io.Seeker // and io.ReaderAt, an error is returned. -func (z Zip) Extract(ctx context.Context, sourceArchive io.Reader, pathsInArchive []string, handleFile FileHandler) error { +func (z Zip) Extract(ctx context.Context, sourceArchive io.Reader, handleFile FileHandler) error { sra, ok := sourceArchive.(seekReaderAt) if !ok { return fmt.Errorf("input type must be an io.ReaderAt and io.Seeker because of zip format constraints") @@ -204,22 +211,28 @@ func (z Zip) Extract(ctx context.Context, sourceArchive io.Reader, pathsInArchiv // ensure filename and comment are UTF-8 encoded (issue #147 and PR #305) z.decodeText(&f.FileHeader) - if !fileIsIncluded(pathsInArchive, f.Name) { - continue - } if fileIsIncluded(skipDirs, f.Name) { continue } - file := File{ - FileInfo: f.FileInfo(), + info := f.FileInfo() + file := FileInfo{ + FileInfo: info, Header: f.FileHeader, NameInArchive: f.Name, - Open: func() (io.ReadCloser, error) { return f.Open() }, + Open: func() (fs.File, error) { + openedFile, err := f.Open() + if err != nil { + return nil, err + } + return fileInArchive{openedFile, info}, nil + }, } err := handleFile(ctx, file) - if errors.Is(err, fs.SkipDir) { + if errors.Is(err, fs.SkipAll) { + break + } else if errors.Is(err, fs.SkipDir) { // if a directory, skip this path; if a file, skip the folder path dirPath := f.Name if !file.IsDir() { @@ -256,25 +269,69 @@ func (z Zip) decodeText(hdr *zip.FileHeader) { } } -type seekReaderAt interface { - io.ReaderAt - io.Seeker -} - -func streamSizeBySeeking(s io.Seeker) (int64, error) { - currentPosition, err := s.Seek(0, io.SeekCurrent) - if err != nil { - return 0, fmt.Errorf("getting current offset: %w", err) - } - maxPosition, err := s.Seek(0, io.SeekEnd) +// Insert appends the listed files into the provided Zip archive stream. +func (z Zip) Insert(ctx context.Context, into io.ReadWriteSeeker, files []FileInfo) error { + // following very simple example at https://github.com/STARRY-S/zip?tab=readme-ov-file#usage + zu, err := szip.NewUpdater(into) if err != nil { - return 0, fmt.Errorf("fast-forwarding to end: %w", err) + return err } - _, err = s.Seek(currentPosition, io.SeekStart) - if err != nil { - return 0, fmt.Errorf("returning to prior offset %d: %w", currentPosition, err) + defer zu.Close() + + for idx, file := range files { + if err := ctx.Err(); err != nil { + return err // honor context cancellation + } + + hdr, err := szip.FileInfoHeader(file) + if err != nil { + return fmt.Errorf("getting info for file %d: %s: %w", idx, file.NameInArchive, err) + } + hdr.Name = file.NameInArchive // complete path, since FileInfoHeader() only has base name + if hdr.Name == "" { + hdr.Name = file.Name() // assume base name of file I guess + } + + // customize header based on file properties + if file.IsDir() { + if !strings.HasSuffix(hdr.Name, "/") { + hdr.Name += "/" // required + } + hdr.Method = zip.Store + } else if z.SelectiveCompression { + // only enable compression on compressable files + ext := strings.ToLower(path.Ext(hdr.Name)) + if _, ok := compressedFormats[ext]; ok { + hdr.Method = zip.Store + } else { + hdr.Method = z.Compression + } + } + + w, err := zu.AppendHeaderAt(hdr, -1) + if err != nil { + return fmt.Errorf("inserting file header: %d: %s: %w", idx, file.Name(), err) + } + + // directories have no file body + if file.IsDir() { + return nil + } + if err := openAndCopyFile(file, w); err != nil { + if z.ContinueOnError && ctx.Err() == nil { + log.Printf("[ERROR] appending file %d into archive: %s: %v", idx, file.Name(), err) + continue + } + return fmt.Errorf("copying inserted file %d: %s: %w", idx, file.Name(), err) + } } - return maxPosition, nil + + return nil +} + +type seekReaderAt interface { + io.ReaderAt + io.Seeker } // Additional compression methods not offered by archive/zip. diff --git a/vendor/github.com/mholt/archiver/v4/zlib.go b/vendor/github.com/mholt/archiver/v4/zlib.go index ce07890d..485991e6 100644 --- a/vendor/github.com/mholt/archiver/v4/zlib.go +++ b/vendor/github.com/mholt/archiver/v4/zlib.go @@ -1,7 +1,7 @@ package archiver import ( - "bytes" + "context" "io" "strings" @@ -17,22 +17,24 @@ type Zlib struct { CompressionLevel int } -func (Zlib) Name() string { return ".zz" } +func (Zlib) Extension() string { return ".zz" } -func (zz Zlib) Match(filename string, stream io.Reader) (MatchResult, error) { +func (zz Zlib) Match(_ context.Context, filename string, stream io.Reader) (MatchResult, error) { var mr MatchResult // match filename - if strings.Contains(strings.ToLower(filename), zz.Name()) { + if strings.Contains(strings.ToLower(filename), zz.Extension()) { mr.ByName = true } // match file header - buf, err := readAtMost(stream, len(ZlibHeader)) - if err != nil { + buf, err := readAtMost(stream, 2) + // If an error occurred or buf is not 2 bytes we can't check the header + if err != nil || len(buf) < 2 { return mr, err } - mr.ByStream = bytes.Equal(buf, ZlibHeader) + + mr.ByStream = isValidZlibHeader(buf[0], buf[1]) return mr, nil } @@ -49,4 +51,23 @@ func (Zlib) OpenReader(r io.Reader) (io.ReadCloser, error) { return zlib.NewReader(r) } -var ZlibHeader = []byte{0x78} +func isValidZlibHeader(first, second byte) bool { + // Define all 32 valid zlib headers, see https://stackoverflow.com/questions/9050260/what-does-a-zlib-header-look-like/54915442#54915442 + validHeaders := map[uint16]struct{}{ + 0x081D: {}, 0x085B: {}, 0x0899: {}, 0x08D7: {}, + 0x1819: {}, 0x1857: {}, 0x1895: {}, 0x18D3: {}, + 0x2815: {}, 0x2853: {}, 0x2891: {}, 0x28CF: {}, + 0x3811: {}, 0x384F: {}, 0x388D: {}, 0x38CB: {}, + 0x480D: {}, 0x484B: {}, 0x4889: {}, 0x48C7: {}, + 0x5809: {}, 0x5847: {}, 0x5885: {}, 0x58C3: {}, + 0x6805: {}, 0x6843: {}, 0x6881: {}, 0x68DE: {}, + 0x7801: {}, 0x785E: {}, 0x789C: {}, 0x78DA: {}, + } + + // Combine the first and second bytes into a single 16-bit, big-endian value + header := uint16(first)<<8 | uint16(second) + + // Check if the header is in the map of valid headers + _, isValid := validHeaders[header] + return isValid +} diff --git a/vendor/github.com/mholt/archiver/v4/zstd.go b/vendor/github.com/mholt/archiver/v4/zstd.go index fe07b76f..cd0c2814 100644 --- a/vendor/github.com/mholt/archiver/v4/zstd.go +++ b/vendor/github.com/mholt/archiver/v4/zstd.go @@ -2,6 +2,7 @@ package archiver import ( "bytes" + "context" "io" "strings" @@ -18,13 +19,13 @@ type Zstd struct { DecoderOptions []zstd.DOption } -func (Zstd) Name() string { return ".zst" } +func (Zstd) Extension() string { return ".zst" } -func (zs Zstd) Match(filename string, stream io.Reader) (MatchResult, error) { +func (zs Zstd) Match(_ context.Context, filename string, stream io.Reader) (MatchResult, error) { var mr MatchResult // match filename - if strings.Contains(strings.ToLower(filename), zs.Name()) { + if strings.Contains(strings.ToLower(filename), zs.Extension()) { mr.ByName = true } diff --git a/vendor/github.com/nwaples/rardecode/v2/archive.go b/vendor/github.com/nwaples/rardecode/v2/archive.go index 0255eb91..cef112dd 100644 --- a/vendor/github.com/nwaples/rardecode/v2/archive.go +++ b/vendor/github.com/nwaples/rardecode/v2/archive.go @@ -13,11 +13,11 @@ const ( ) var ( - errCorruptHeader = errors.New("rardecode: corrupt block header") - errCorruptFileHeader = errors.New("rardecode: corrupt file header") - errBadHeaderCrc = errors.New("rardecode: bad header crc") - errUnknownDecoder = errors.New("rardecode: unknown decoder version") - errDecoderOutOfData = errors.New("rardecode: decoder expected more data than is in packed file") + ErrCorruptBlockHeader = errors.New("rardecode: corrupt block header") + ErrCorruptFileHeader = errors.New("rardecode: corrupt file header") + ErrBadHeaderCRC = errors.New("rardecode: bad header crc") + ErrUnknownDecoder = errors.New("rardecode: unknown decoder version") + ErrDecoderOutOfData = errors.New("rardecode: decoder expected more data than is in packed file") ) type readBuf []byte @@ -111,6 +111,6 @@ func newFileBlockReader(v *volume) (fileBlockReader, error) { case 1: return newArchive50(pass), nil default: - return nil, errUnknownArc + return nil, ErrUnknownVersion } } diff --git a/vendor/github.com/nwaples/rardecode/v2/archive15.go b/vendor/github.com/nwaples/rardecode/v2/archive15.go index 1a3875a5..244d471d 100644 --- a/vendor/github.com/nwaples/rardecode/v2/archive15.go +++ b/vendor/github.com/nwaples/rardecode/v2/archive15.go @@ -17,6 +17,7 @@ const ( // block types blockArc = 0x73 blockFile = 0x74 + blockComment = 0x75 blockService = 0x7a blockEnd = 0x7b @@ -25,6 +26,7 @@ const ( // archive block flags arcVolume = 0x0001 + arcComment = 0x0002 arcSolid = 0x0008 arcNewNaming = 0x0010 arcEncrypted = 0x0080 @@ -50,7 +52,7 @@ const ( ) var ( - errUnsupportedDecoder = errors.New("rardecode: unsupported decoder version") + ErrUnsupportedDecoder = errors.New("rardecode: unsupported decoder version") ) type blockHeader15 struct { @@ -255,7 +257,7 @@ func (a *archive15) parseFileHeader(h *blockHeader15) (*fileBlockHeader, error) b := h.data if len(b) < 21 { - return nil, errCorruptFileHeader + return nil, ErrCorruptFileHeader } f.PackedSize = h.dataSize @@ -273,7 +275,7 @@ func (a *archive15) parseFileHeader(h *blockHeader15) (*fileBlockHeader, error) f.Attributes = int64(b.uint32()) if h.flags&fileLargeData > 0 { if len(b) < 8 { - return nil, errCorruptFileHeader + return nil, ErrCorruptFileHeader } _ = b.uint32() // already read large PackedSize in readBlockHeader f.UnPackedSize |= int64(b.uint32()) << 32 @@ -283,7 +285,7 @@ func (a *archive15) parseFileHeader(h *blockHeader15) (*fileBlockHeader, error) f.UnPackedSize = -1 } if len(b) < namesize { - return nil, errCorruptFileHeader + return nil, ErrCorruptFileHeader } name := b.bytes(namesize) if h.flags&fileUnicode == 0 { @@ -309,7 +311,7 @@ func (a *archive15) parseFileHeader(h *blockHeader15) (*fileBlockHeader, error) var salt []byte if h.flags&fileSalt > 0 { if len(b) < saltSize { - return nil, errCorruptFileHeader + return nil, ErrCorruptFileHeader } salt = b.bytes(saltSize) } @@ -328,13 +330,13 @@ func (a *archive15) parseFileHeader(h *blockHeader15) (*fileBlockHeader, error) if method != 0 { switch unpackver { case 15: - return nil, errUnsupportedDecoder + return nil, ErrUnsupportedDecoder case 20, 26: f.decVer = decode20Ver case 29: f.decVer = decode29Ver default: - return nil, errUnknownDecoder + return nil, ErrUnknownDecoder } } return f, nil @@ -366,8 +368,14 @@ func (a *archive15) readBlockHeader(r sliceReader) (*blockHeader15, error) { h.htype = b.byte() h.flags = b.uint16() size := int(b.uint16()) - if size < 7 { - return nil, errCorruptHeader + if h.htype == blockArc && h.flags&arcComment > 0 { + // comment block embedded into archive block + if size < 13 { + return nil, ErrCorruptBlockHeader + } + size = 13 + } else if size < 7 { + return nil, ErrCorruptBlockHeader } h.data, err = r.readSlice(size) if err != nil { @@ -377,20 +385,27 @@ func (a *archive15) readBlockHeader(r sliceReader) (*blockHeader15, error) { return nil, err } hash := crc32.NewIEEE() - _, _ = hash.Write(h.data[2:]) // Write should always succeed + if h.htype == blockComment { + if size < 13 { + return nil, ErrCorruptBlockHeader + } + _, _ = hash.Write(h.data[2:13]) + } else { + _, _ = hash.Write(h.data[2:]) + } if crc != uint16(hash.Sum32()) { - return nil, errBadHeaderCrc + return nil, ErrBadHeaderCRC } h.data = h.data[7:] if h.flags&blockHasData > 0 { if len(h.data) < 4 { - return nil, errCorruptHeader + return nil, ErrCorruptBlockHeader } h.dataSize = int64(h.data.uint32()) } if (h.htype == blockService || h.htype == blockFile) && h.flags&fileLargeData > 0 { if len(h.data) < 25 { - return nil, errCorruptHeader + return nil, ErrCorruptBlockHeader } b := h.data[21:25] h.dataSize |= int64(b.uint32()) << 32 diff --git a/vendor/github.com/nwaples/rardecode/v2/archive50.go b/vendor/github.com/nwaples/rardecode/v2/archive50.go index bdf7080d..5e58f920 100644 --- a/vendor/github.com/nwaples/rardecode/v2/archive50.go +++ b/vendor/github.com/nwaples/rardecode/v2/archive50.go @@ -52,9 +52,9 @@ const ( ) var ( - errBadPassword = errors.New("rardecode: incorrect password") - errCorruptEncrypt = errors.New("rardecode: corrupt encryption data") - errUnknownEncMethod = errors.New("rardecode: unknown encryption method") + ErrBadPassword = errors.New("rardecode: incorrect password") + ErrCorruptEncryptData = errors.New("rardecode: corrupt encryption data") + ErrUnknownEncryptMethod = errors.New("rardecode: unknown encryption method") ) type extra struct { @@ -159,7 +159,7 @@ func (a *archive50) getKeys(kdfCount int, salt, check []byte) ([][]byte, error) var keys [][]byte if kdfCount > maxKdfCount { - return nil, errCorruptEncrypt + return nil, ErrCorruptEncryptData } kdfCount = 1 << uint(kdfCount) @@ -183,7 +183,7 @@ func (a *archive50) getKeys(kdfCount int, salt, check []byte) ([][]byte, error) // check password if check != nil && !bytes.Equal(check, keys[2]) { - return nil, errBadPassword + return nil, ErrBadPassword } return keys, nil } @@ -191,11 +191,11 @@ func (a *archive50) getKeys(kdfCount int, salt, check []byte) ([][]byte, error) // parseFileEncryptionRecord processes the optional file encryption record from a file header. func (a *archive50) parseFileEncryptionRecord(b readBuf, f *fileBlockHeader) error { if ver := b.uvarint(); ver != 0 { - return errUnknownEncMethod + return ErrUnknownEncryptMethod } flags := b.uvarint() if len(b) < 33 { - return errCorruptEncrypt + return ErrCorruptEncryptData } kdfCount := int(b.byte()) salt := b.bytes(16) @@ -204,7 +204,7 @@ func (a *archive50) parseFileEncryptionRecord(b readBuf, f *fileBlockHeader) err var check []byte if flags&file5EncCheckPresent > 0 { if len(b) < 12 { - return errCorruptEncrypt + return ErrCorruptEncryptData } check = b.bytes(12) } @@ -235,13 +235,13 @@ func (a *archive50) parseFileHeader(h *blockHeader50) (*fileBlockHeader, error) f.Attributes = int64(h.data.uvarint()) if flags&file5HasUnixMtime > 0 { if len(h.data) < 4 { - return nil, errCorruptFileHeader + return nil, ErrCorruptFileHeader } f.ModificationTime = time.Unix(int64(h.data.uint32()), 0) } if flags&file5HasCRC32 > 0 { if len(h.data) < 4 { - return nil, errCorruptFileHeader + return nil, ErrCorruptFileHeader } f.sum = append([]byte(nil), h.data.bytes(4)...) if f.first { @@ -257,7 +257,7 @@ func (a *archive50) parseFileHeader(h *blockHeader50) (*fileBlockHeader, error) if f.first && method != 0 { unpackver := flags & 0x003f if unpackver != 0 { - return nil, errUnknownDecoder + return nil, ErrUnknownDecoder } f.decVer = decode50Ver } @@ -271,7 +271,7 @@ func (a *archive50) parseFileHeader(h *blockHeader50) (*fileBlockHeader, error) } nlen := int(h.data.uvarint()) if len(h.data) < nlen { - return nil, errCorruptFileHeader + return nil, ErrCorruptFileHeader } f.Name = string(h.data.bytes(nlen)) @@ -303,11 +303,11 @@ func (a *archive50) parseFileHeader(h *blockHeader50) (*fileBlockHeader, error) // parseEncryptionBlock calculates the key for block encryption. func (a *archive50) parseEncryptionBlock(b readBuf) error { if ver := b.uvarint(); ver != 0 { - return errUnknownEncMethod + return ErrUnknownEncryptMethod } flags := b.uvarint() if len(b) < 17 { - return errCorruptEncrypt + return ErrCorruptEncryptData } kdfCount := int(b.byte()) salt := b.bytes(16) @@ -315,7 +315,7 @@ func (a *archive50) parseEncryptionBlock(b readBuf) error { var check []byte if flags&enc5CheckPresent > 0 { if len(b) < 12 { - return errCorruptEncrypt + return ErrCorruptEncryptData } check = b.bytes(12) } @@ -357,7 +357,7 @@ func (a *archive50) readBlockHeader(r sliceReader) (*blockHeader50, error) { // check header crc _, _ = hash.Write(b[4:]) if crc != hash.Sum32() { - return nil, errBadHeaderCrc + return nil, ErrBadHeaderCRC } b = b[len(b)-size:] @@ -373,7 +373,7 @@ func (a *archive50) readBlockHeader(r sliceReader) (*blockHeader50, error) { h.dataSize = int64(b.uvarint()) } if len(b) < extraSize { - return nil, errCorruptHeader + return nil, ErrCorruptBlockHeader } h.data = b.bytes(len(b) - extraSize) @@ -381,7 +381,7 @@ func (a *archive50) readBlockHeader(r sliceReader) (*blockHeader50, error) { for len(b) > 0 { size = int(b.uvarint()) if len(b) < size { - return nil, errCorruptHeader + return nil, ErrCorruptBlockHeader } data := readBuf(b.bytes(size)) ftype := data.uvarint() diff --git a/vendor/github.com/nwaples/rardecode/v2/bit_reader.go b/vendor/github.com/nwaples/rardecode/v2/bit_reader.go index 9aaa94ca..768a0803 100644 --- a/vendor/github.com/nwaples/rardecode/v2/bit_reader.go +++ b/vendor/github.com/nwaples/rardecode/v2/bit_reader.go @@ -32,7 +32,7 @@ func (r *rar5BitReader) ReadByte() (byte, error) { r.b, err = r.r.bytes() if err != nil { if err == io.EOF { - err = errDecoderOutOfData + err = ErrDecoderOutOfData } return 0, err } @@ -67,7 +67,7 @@ func (r *rar5BitReader) readBits(n uint8) (int, error) { if err != nil { if err == io.EOF { // io.EOF before we reached bit limit - err = errDecoderOutOfData + err = ErrDecoderOutOfData } return 0, err } diff --git a/vendor/github.com/nwaples/rardecode/v2/decode20.go b/vendor/github.com/nwaples/rardecode/v2/decode20.go index ad7d0f43..a61f6c79 100644 --- a/vendor/github.com/nwaples/rardecode/v2/decode20.go +++ b/vendor/github.com/nwaples/rardecode/v2/decode20.go @@ -65,7 +65,7 @@ func readCodeLengthTable20(br *rarBitReader, table []byte) error { } if l == 16 { if i == 0 { - return errInvalidLengthTable + return ErrInvalidLengthTable } var n int n, err = br.readBits(2) @@ -104,7 +104,6 @@ func readCodeLengthTable20(br *rarBitReader, table []byte) error { } func (d *decoder20) readBlockHeader() error { - d.br.alignByte() n, err := d.br.readBits(1) if err != nil { return err @@ -156,7 +155,7 @@ func (d *decoder20) fill(dr *decodeReader) error { d.hdrRead = false continue case io.EOF: - err = errDecoderOutOfData + err = ErrDecoderOutOfData } return err } diff --git a/vendor/github.com/nwaples/rardecode/v2/decode20_lz.go b/vendor/github.com/nwaples/rardecode/v2/decode20_lz.go index bcf6a76e..0134b3c1 100644 --- a/vendor/github.com/nwaples/rardecode/v2/decode20_lz.go +++ b/vendor/github.com/nwaples/rardecode/v2/decode20_lz.go @@ -32,7 +32,7 @@ func (d *lz20Decoder) init(br *rarBitReader, table []byte) error { return nil } -func (d *lz20Decoder) decodeOffset(dr *decodeReader, i int) error { +func (d *lz20Decoder) decodeOffset(i int) error { d.length = lengthBase[i] + 3 bits := lengthExtraBits[i] if bits > 0 { @@ -69,7 +69,7 @@ func (d *lz20Decoder) decodeOffset(dr *decodeReader, i int) error { return nil } -func (d *lz20Decoder) decodeLength(dr *decodeReader, i int) error { +func (d *lz20Decoder) decodeLength(i int) error { offset := d.offset[i] copy(d.offset[1:], d.offset[:]) d.offset[0] = offset @@ -100,7 +100,7 @@ func (d *lz20Decoder) decodeLength(dr *decodeReader, i int) error { return nil } -func (d *lz20Decoder) decodeShortOffset(dr *decodeReader, i int) error { +func (d *lz20Decoder) decodeShortOffset(i int) error { copy(d.offset[1:], d.offset[:]) offset := shortOffsetBase[i] + 1 bits := shortOffsetExtraBits[i] @@ -130,15 +130,15 @@ func (d *lz20Decoder) fill(dr *decodeReader, size int64) (int64, error) { n++ continue case sym > 269: - err = d.decodeOffset(dr, sym-270) + err = d.decodeOffset(sym - 270) case sym == 269: return n, errEndOfBlock case sym == 256: // use previous offset and length copy(d.offset[1:], d.offset[:]) case sym < 261: - err = d.decodeLength(dr, sym-257) + err = d.decodeLength(sym - 257) default: - err = d.decodeShortOffset(dr, sym-261) + err = d.decodeShortOffset(sym - 261) } if err != nil { return n, err diff --git a/vendor/github.com/nwaples/rardecode/v2/decode29.go b/vendor/github.com/nwaples/rardecode/v2/decode29.go index a332db70..86ce7799 100644 --- a/vendor/github.com/nwaples/rardecode/v2/decode29.go +++ b/vendor/github.com/nwaples/rardecode/v2/decode29.go @@ -70,7 +70,7 @@ func readVMCode(br *rarBitReader) ([]byte, error) { return nil, err } if n > maxCodeSize || n == 0 { - return nil, errInvalidFilter + return nil, ErrInvalidFilter } buf := make([]byte, n) err = br.readFull(buf) @@ -83,7 +83,7 @@ func readVMCode(br *rarBitReader) ([]byte, error) { } // simple xor checksum on data if x != buf[0] { - return nil, errInvalidFilter + return nil, ErrInvalidFilter } return buf, nil } @@ -105,10 +105,10 @@ func (d *decoder29) parseVMFilter(buf []byte) (*filterBlock, error) { } else { n-- if n > maxUniqueFilters { - return nil, errInvalidFilter + return nil, ErrInvalidFilter } if int(n) > len(d.filters) { - return nil, errInvalidFilter + return nil, ErrInvalidFilter } } d.fnum = int(n) @@ -178,7 +178,7 @@ func (d *decoder29) parseVMFilter(buf []byte) (*filterBlock, error) { return nil, err } if n > vmGlobalSize-vmFixedGlobalSize { - return nil, errInvalidFilter + return nil, ErrInvalidFilter } g = make([]byte, n) err = br.readFull(g) @@ -204,7 +204,7 @@ func (d *decoder29) readBlockHeader() error { if n > 0 { d.isPPM = true if d.ppm == nil { - d.ppm = new(ppm29Decoder) + d.ppm = newPPM29Decoder() } err = d.ppm.init(d.br) } else { @@ -216,7 +216,7 @@ func (d *decoder29) readBlockHeader() error { } } if err == io.EOF { - err = errDecoderOutOfData + err = ErrDecoderOutOfData } d.hdrRead = true return err @@ -263,7 +263,7 @@ func (d *decoder29) fill(dr *decodeReader) error { d.hdrRead = false err = io.EOF case io.EOF: - err = errDecoderOutOfData + err = ErrDecoderOutOfData } return err } diff --git a/vendor/github.com/nwaples/rardecode/v2/decode29_lz.go b/vendor/github.com/nwaples/rardecode/v2/decode29_lz.go index 0b3a619d..198692ed 100644 --- a/vendor/github.com/nwaples/rardecode/v2/decode29_lz.go +++ b/vendor/github.com/nwaples/rardecode/v2/decode29_lz.go @@ -129,7 +129,7 @@ func (d *lz29Decoder) readEndOfBlock() error { return errEndOfFile } -func (d *lz29Decoder) decodeLength(dr *decodeReader, i int) error { +func (d *lz29Decoder) decodeLength(i int) error { offset := d.offset[i] copy(d.offset[1:i+1], d.offset[:i]) d.offset[0] = offset @@ -151,7 +151,7 @@ func (d *lz29Decoder) decodeLength(dr *decodeReader, i int) error { return nil } -func (d *lz29Decoder) decodeShortOffset(dr *decodeReader, i int) error { +func (d *lz29Decoder) decodeShortOffset(i int) error { copy(d.offset[1:], d.offset[:]) offset := shortOffsetBase[i] + 1 bits := shortOffsetExtraBits[i] @@ -167,7 +167,7 @@ func (d *lz29Decoder) decodeShortOffset(dr *decodeReader, i int) error { return nil } -func (d *lz29Decoder) decodeOffset(dr *decodeReader, i int) error { +func (d *lz29Decoder) decodeOffset(i int) error { d.length = lengthBase[i] + 3 bits := lengthExtraBits[i] if bits > 0 { @@ -247,11 +247,11 @@ func (d *lz29Decoder) fill(dr *decodeReader) ([]byte, error) { dr.copyBytes(d.length, d.offset[0]) continue case sym >= 271: - err = d.decodeOffset(dr, sym-271) + err = d.decodeOffset(sym - 271) case sym >= 263: - err = d.decodeShortOffset(dr, sym-263) + err = d.decodeShortOffset(sym - 263) case sym >= 259: - err = d.decodeLength(dr, sym-259) + err = d.decodeLength(sym - 259) case sym == 256: return nil, d.readEndOfBlock() default: // sym == 257 diff --git a/vendor/github.com/nwaples/rardecode/v2/decode29_ppm.go b/vendor/github.com/nwaples/rardecode/v2/decode29_ppm.go index c3b515d8..365b8529 100644 --- a/vendor/github.com/nwaples/rardecode/v2/decode29_ppm.go +++ b/vendor/github.com/nwaples/rardecode/v2/decode29_ppm.go @@ -136,3 +136,12 @@ func (d *ppm29Decoder) fill(dr *decodeReader) ([]byte, error) { } return nil, nil } + +func newPPM29Decoder() *ppm29Decoder { + ppm := new(ppm29Decoder) + ppm.reset() + ppm.m.maxOrder = 2 + ppm.m.a.init(1) + + return ppm +} diff --git a/vendor/github.com/nwaples/rardecode/v2/decode50.go b/vendor/github.com/nwaples/rardecode/v2/decode50.go index f9c036d4..cc1f17f9 100644 --- a/vendor/github.com/nwaples/rardecode/v2/decode50.go +++ b/vendor/github.com/nwaples/rardecode/v2/decode50.go @@ -14,8 +14,8 @@ const ( ) var ( - errUnknownFilter = errors.New("rardecode: unknown V5 filter") - errCorruptDecodeHeader = errors.New("rardecode: corrupt decode header") + ErrUnknownFilter = errors.New("rardecode: unknown V5 filter") + ErrCorruptDecodeHeader = errors.New("rardecode: corrupt decode header") ) // decoder50 implements the decoder interface for RAR 5 compression. @@ -62,7 +62,7 @@ func (d *decoder50) readBlockHeader() error { bytecount := (flags>>3)&3 + 1 if bytecount == 4 { - return errCorruptDecodeHeader + return ErrCorruptDecodeHeader } hsum, err := d.br.ReadByte() @@ -83,7 +83,7 @@ func (d *decoder50) readBlockHeader() error { blockBytes |= int(n) << (i * 8) } if sum != hsum { // bad header checksum - return errCorruptDecodeHeader + return ErrCorruptDecodeHeader } blockBits += (blockBytes - 1) * 8 @@ -176,7 +176,7 @@ func (d *decoder50) readFilter(dr *decodeReader) error { case 3: fb.filter = filterArm default: - return errUnknownFilter + return ErrUnknownFilter } return dr.queueFilter(fb) } @@ -293,7 +293,7 @@ func (d *decoder50) fill(dr *decodeReader) error { } if err != nil { if err == io.EOF { - return errDecoderOutOfData + return ErrDecoderOutOfData } return err } diff --git a/vendor/github.com/nwaples/rardecode/v2/decode_reader.go b/vendor/github.com/nwaples/rardecode/v2/decode_reader.go index fbed77f1..d9c2db06 100644 --- a/vendor/github.com/nwaples/rardecode/v2/decode_reader.go +++ b/vendor/github.com/nwaples/rardecode/v2/decode_reader.go @@ -8,9 +8,9 @@ const ( ) var ( - errTooManyFilters = errors.New("rardecode: too many filters") - errInvalidFilter = errors.New("rardecode: invalid filter") - errMultipleDecoders = errors.New("rardecode: multiple decoders in a single archive not supported") + ErrTooManyFilters = errors.New("rardecode: too many filters") + ErrInvalidFilter = errors.New("rardecode: invalid filter") + ErrMultipleDecoders = errors.New("rardecode: multiple decoders in a single archive not supported") ) // filter functions take a byte slice, the current output offset and @@ -94,10 +94,10 @@ func (d *decodeReader) init(r byteReader, ver int, winsize uint, reset bool, unP case decode20Ver: d.dec = new(decoder20) default: - return errUnknownDecoder + return ErrUnknownDecoder } } else if d.dec.version() != ver { - return errMultipleDecoders + return ErrMultipleDecoders } d.dec.init(r, reset, unPackedSize) return nil @@ -150,7 +150,7 @@ func (d *decodeReader) copyBytes(length, offset int) { // queueFilter adds a filterBlock to the end decodeReader's filters. func (d *decodeReader) queueFilter(f *filterBlock) error { if len(d.fl) >= maxQueuedFilters { - return errTooManyFilters + return ErrTooManyFilters } // make offset relative to read index (from write index) f.offset += d.w - d.r @@ -158,7 +158,7 @@ func (d *decodeReader) queueFilter(f *filterBlock) error { for _, fb := range d.fl { if f.offset < fb.offset { // filter block must not start before previous filter - return errInvalidFilter + return ErrInvalidFilter } f.offset -= fb.offset } @@ -243,7 +243,7 @@ func (d *decodeReader) processFilters() ([]byte, error) { return b, nil } if f.length != len(b) { - return nil, errInvalidFilter + return nil, ErrInvalidFilter } } } @@ -269,7 +269,7 @@ func (d *decodeReader) bytes() ([]byte, error) { // check filters f := d.fl[0] if f.offset < 0 { - return nil, errInvalidFilter + return nil, ErrInvalidFilter } if f.offset > 0 { // filter not at current read index, output bytes before it diff --git a/vendor/github.com/nwaples/rardecode/v2/filters.go b/vendor/github.com/nwaples/rardecode/v2/filters.go index 948422fe..6e625f39 100644 --- a/vendor/github.com/nwaples/rardecode/v2/filters.go +++ b/vendor/github.com/nwaples/rardecode/v2/filters.go @@ -296,7 +296,7 @@ type vmFilter struct { // execute implements v3filter type for VM based RAR 3 filters. func (f *vmFilter) execute(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) { if len(buf) > vmGlobalAddr { - return buf, errInvalidFilter + return buf, ErrInvalidFilter } v := newVM(buf) diff --git a/vendor/github.com/nwaples/rardecode/v2/huffman.go b/vendor/github.com/nwaples/rardecode/v2/huffman.go index 03a90d56..e8672751 100644 --- a/vendor/github.com/nwaples/rardecode/v2/huffman.go +++ b/vendor/github.com/nwaples/rardecode/v2/huffman.go @@ -12,8 +12,8 @@ const ( ) var ( - errHuffDecodeFailed = errors.New("rardecode: huffman decode failed") - errInvalidLengthTable = errors.New("rardecode: invalid huffman code length table") + ErrHuffDecodeFailed = errors.New("rardecode: huffman decode failed") + ErrInvalidLengthTable = errors.New("rardecode: invalid huffman code length table") ) type huffmanDecoder struct { @@ -131,7 +131,7 @@ func (h *huffmanDecoder) readSym(r bitReader) (int, error) { pos := int(h.pos[bits]) + int(dist) if pos >= len(h.symbol) { - return 0, errHuffDecodeFailed + return 0, ErrHuffDecodeFailed } return int(h.symbol[pos]), nil @@ -194,7 +194,7 @@ func readCodeLengthTable(br bitReader, codeLength []byte, addOld bool) error { } if l < 18 { if i == 0 { - return errInvalidLengthTable + return ErrInvalidLengthTable } value = codeLength[i-1] } diff --git a/vendor/github.com/nwaples/rardecode/v2/ppm_model.go b/vendor/github.com/nwaples/rardecode/v2/ppm_model.go index fd55a741..0b3b6c75 100644 --- a/vendor/github.com/nwaples/rardecode/v2/ppm_model.go +++ b/vendor/github.com/nwaples/rardecode/v2/ppm_model.go @@ -31,7 +31,7 @@ const ( ) var ( - errCorruptPPM = errors.New("rardecode: corrupt ppm data") + ErrCorruptPPM = errors.New("rardecode: corrupt ppm data") expEscape = []byte{25, 14, 9, 7, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2} initBinEsc = []uint16{0x3CDD, 0x1F3F, 0x59BF, 0x48F3, 0x64A1, 0x5ABC, 0x6632, 0x6051} @@ -616,7 +616,7 @@ func (m *model) init(br io.ByteReader, reset bool, maxOrder, maxMB int) error { m.a.init(maxMB) if maxOrder == 1 { - return errCorruptPPM + return ErrCorruptPPM } m.maxOrder = maxOrder m.prevSym = 0 @@ -718,7 +718,7 @@ func (m *model) decodeSymbol1(c context) (*state, error) { // protect against divide by zero // TODO: look at why this happens, may be problem elsewhere if scale == 0 { - return nil, errCorruptPPM + return nil, ErrCorruptPPM } count := m.rc.currentCount(scale) m.prevSuccess = 0 @@ -799,7 +799,7 @@ func (m *model) decodeSymbol2(c context, numMasked int) (*state, error) { count := m.rc.currentCount(scale) if count >= scale { - return nil, errCorruptPPM + return nil, ErrCorruptPPM } if count >= hi { err := m.rc.decode(hi, scale) @@ -1044,7 +1044,7 @@ func (m *model) ReadByte() (byte, error) { m.orderFall++ minC = m.a.contextSuffix(minC) if minC <= 0 { - return 0, errCorruptPPM + return 0, ErrCorruptPPM } } s, err = m.decodeSymbol2(minC, n) diff --git a/vendor/github.com/nwaples/rardecode/v2/reader.go b/vendor/github.com/nwaples/rardecode/v2/reader.go index b739dc29..b0a98c09 100644 --- a/vendor/github.com/nwaples/rardecode/v2/reader.go +++ b/vendor/github.com/nwaples/rardecode/v2/reader.go @@ -28,12 +28,12 @@ const ( ) var ( - errShortFile = errors.New("rardecode: decoded file too short") - errInvalidFileBlock = errors.New("rardecode: invalid file block") - errUnexpectedArcEnd = errors.New("rardecode: unexpected end of archive") - errBadFileChecksum = errors.New("rardecode: bad file checksum") - errSolidOpen = errors.New("rardecode: solid files don't support Open") - errUnknownArc = errors.New("rardecode: unknown archive version") + ErrShortFile = errors.New("rardecode: decoded file too short") + ErrInvalidFileBlock = errors.New("rardecode: invalid file block") + ErrUnexpectedArcEnd = errors.New("rardecode: unexpected end of archive") + ErrBadFileChecksum = errors.New("rardecode: bad file checksum") + ErrSolidOpen = errors.New("rardecode: solid files don't support Open") + ErrUnknownVersion = errors.New("rardecode: unknown archive version") ) // FileHeader represents a single file in a RAR archive. @@ -140,12 +140,12 @@ func (f *packedFileReader) nextBlock() error { if err != nil { if err == io.EOF { // archive ended, but file hasn't - return errUnexpectedArcEnd + return ErrUnexpectedArcEnd } return err } if h.first || h.Name != f.h.Name { - return errInvalidFileBlock + return ErrInvalidFileBlock } f.n = h.PackedSize f.h = h @@ -167,7 +167,7 @@ func (f *packedFileReader) next() (*fileBlockHeader, error) { return nil, err } if !f.h.first { - return nil, errInvalidFileBlock + return nil, ErrInvalidFileBlock } f.n = f.h.PackedSize return f.h, nil @@ -297,7 +297,7 @@ func (cr *checksumReader) eofError() error { } } if !bytes.Equal(sum, h.sum) { - return errBadFileChecksum + return ErrBadFileChecksum } return io.EOF } @@ -421,7 +421,7 @@ func (r *Reader) nextFile() error { } if h.UnPackedSize >= 0 && !h.UnKnownSize { // Limit reading to UnPackedSize as there may be padding - r.r = &limitedReader{r.r, h.UnPackedSize, errShortFile} + r.r = &limitedReader{r.r, h.UnPackedSize, ErrShortFile} } if h.hash != nil { r.r = &checksumReader{r.r, h.hash(), r.pr} @@ -471,7 +471,7 @@ type File struct { // contents instead. func (f *File) Open() (io.ReadCloser, error) { if f.Solid { - return nil, errSolidOpen + return nil, ErrSolidOpen } r := new(ReadCloser) r.pr = f.pr.clone() diff --git a/vendor/github.com/nwaples/rardecode/v2/vm.go b/vendor/github.com/nwaples/rardecode/v2/vm.go index 688150f3..6f8b00b6 100644 --- a/vendor/github.com/nwaples/rardecode/v2/vm.go +++ b/vendor/github.com/nwaples/rardecode/v2/vm.go @@ -19,7 +19,7 @@ const ( ) var ( - errInvalidVMInstruction = errors.New("rardecode: invalid vm instruction") + ErrInvalidVMInstruction = errors.New("rardecode: invalid vm instruction") ) type vm struct { @@ -655,7 +655,7 @@ func readCommands(br *rarBitReader) ([]command, error) { } if code >= len(ops) { - return cmds, errInvalidVMInstruction + return cmds, ErrInvalidVMInstruction } ins := ops[code] diff --git a/vendor/github.com/nwaples/rardecode/v2/volume.go b/vendor/github.com/nwaples/rardecode/v2/volume.go index 4754ac20..af600674 100644 --- a/vendor/github.com/nwaples/rardecode/v2/volume.go +++ b/vendor/github.com/nwaples/rardecode/v2/volume.go @@ -21,10 +21,10 @@ const ( ) var ( - errNoSig = errors.New("rardecode: RAR signature not found") - errVerMismatch = errors.New("rardecode: volume version mistmatch") - errArchiveNameEmpty = errors.New("rardecode: archive name empty") - errFileNameRequired = errors.New("rardecode: filename required for multi volume archive") + ErrNoSig = errors.New("rardecode: RAR signature not found") + ErrVerMismatch = errors.New("rardecode: volume version mistmatch") + ErrArchiveNameEmpty = errors.New("rardecode: archive name empty") + ErrFileNameRequired = errors.New("rardecode: filename required for multi volume archive") ) type option struct { @@ -88,7 +88,7 @@ func (v *volume) openFile(file string) error { var f io.Reader if len(file) == 0 { - return errArchiveNameEmpty + return ErrArchiveNameEmpty } if fs := v.opt.fs; fs != nil { f, err = fs.Open(v.dir + file) @@ -209,7 +209,7 @@ func (v *volume) findSig() error { continue } else if err != nil { if err == io.EOF { - err = errNoSig + err = ErrNoSig } return err } @@ -217,7 +217,7 @@ func (v *volume) findSig() error { b, err = v.br.Peek(len(sigPrefix[1:]) + 2) if err != nil { if err == io.EOF { - err = errNoSig + err = ErrNoSig } return err } @@ -235,11 +235,11 @@ func (v *volume) findSig() error { if v.num == 0 { v.ver = ver } else if v.ver != ver { - return errVerMismatch + return ErrVerMismatch } return err } - return errNoSig + return ErrNoSig } func nextNewVolName(file string) string { @@ -321,7 +321,7 @@ func hasDigits(s string) bool { return false } -// nextVolName updates name to the next filename in the archive. +// openNextFile opens the next volume file in the archive. func (v *volume) openNextFile() error { file := v.file if v.num == 0 { @@ -366,7 +366,7 @@ func (v *volume) openNextFile() error { func (v *volume) next() error { if len(v.file) == 0 { - return errFileNameRequired + return ErrFileNameRequired } err := v.Close() if err != nil { diff --git a/vendor/github.com/pierrec/lz4/v4/README.md b/vendor/github.com/pierrec/lz4/v4/README.md index 4629c9d0..dee77545 100644 --- a/vendor/github.com/pierrec/lz4/v4/README.md +++ b/vendor/github.com/pierrec/lz4/v4/README.md @@ -21,7 +21,7 @@ go get github.com/pierrec/lz4/v4 There is a command line interface tool to compress and decompress LZ4 files. ``` -go install github.com/pierrec/lz4/v4/cmd/lz4c +go install github.com/pierrec/lz4/v4/cmd/lz4c@latest ``` Usage diff --git a/vendor/github.com/pierrec/lz4/v4/compressing_reader.go b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go new file mode 100644 index 00000000..8df0dc76 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go @@ -0,0 +1,222 @@ +package lz4 + +import ( + "errors" + "io" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/lz4stream" +) + +type crState int + +const ( + crStateInitial crState = iota + crStateReading + crStateFlushing + crStateDone +) + +type CompressingReader struct { + state crState + src io.ReadCloser // source reader + level lz4block.CompressionLevel // how hard to try + frame *lz4stream.Frame // frame being built + in []byte + out ovWriter + handler func(int) +} + +// NewCompressingReader creates a reader which reads compressed data from +// raw stream. This makes it a logical opposite of a normal lz4.Reader. +// We require an io.ReadCloser as an underlying source for compatibility +// with Go's http.Request. +func NewCompressingReader(src io.ReadCloser) *CompressingReader { + zrd := &CompressingReader { + frame: lz4stream.NewFrame(), + } + + _ = zrd.Apply(DefaultBlockSizeOption, DefaultChecksumOption, defaultOnBlockDone) + zrd.Reset(src) + + return zrd +} + +// Source exposes the underlying source stream for introspection and control. +func (zrd *CompressingReader) Source() io.ReadCloser { + return zrd.src +} + +// Close simply invokes the underlying stream Close method. This method is +// provided for the benefit of Go http client/server, which relies on Close +// for goroutine termination. +func (zrd *CompressingReader) Close() error { + return zrd.src.Close() +} + +// Apply applies useful options to the lz4 encoder. +func (zrd *CompressingReader) Apply(options ...Option) (err error) { + if zrd.state != crStateInitial { + return lz4errors.ErrOptionClosedOrError + } + + zrd.Reset(zrd.src) + + for _, o := range options { + if err = o(zrd); err != nil { + return + } + } + return +} + +func (*CompressingReader) private() {} + +func (zrd *CompressingReader) init() error { + zrd.frame.InitW(&zrd.out, 1, false) + size := zrd.frame.Descriptor.Flags.BlockSizeIndex() + zrd.in = size.Get() + return zrd.frame.Descriptor.Write(zrd.frame, &zrd.out) +} + +// Read allows reading of lz4 compressed data +func (zrd *CompressingReader) Read(p []byte) (n int, err error) { + defer func() { + if err != nil { + zrd.state = crStateDone + } + }() + + if !zrd.out.reset(p) { + return len(p), nil + } + + switch zrd.state { + case crStateInitial: + err = zrd.init() + if err != nil { + return + } + zrd.state = crStateReading + case crStateDone: + return 0, errors.New("This reader is done") + case crStateFlushing: + if zrd.out.dataPos > 0 { + n = zrd.out.dataPos + zrd.out.data = nil + zrd.out.dataPos = 0 + return + } else { + zrd.state = crStateDone + return 0, io.EOF + } + } + + for zrd.state == crStateReading { + block := zrd.frame.Blocks.Block + + var rCount int + rCount, err = io.ReadFull(zrd.src, zrd.in) + switch err { + case nil: + err = block.Compress( + zrd.frame, zrd.in[ : rCount], zrd.level, + ).Write(zrd.frame, &zrd.out) + zrd.handler(len(block.Data)) + if err != nil { + return + } + + if zrd.out.dataPos == len(zrd.out.data) { + n = zrd.out.dataPos + zrd.out.dataPos = 0 + zrd.out.data = nil + return + } + case io.EOF, io.ErrUnexpectedEOF: // read may be partial + if rCount > 0 { + err = block.Compress( + zrd.frame, zrd.in[ : rCount], zrd.level, + ).Write(zrd.frame, &zrd.out) + zrd.handler(len(block.Data)) + if err != nil { + return + } + } + + err = zrd.frame.CloseW(&zrd.out, 1) + if err != nil { + return + } + zrd.state = crStateFlushing + + n = zrd.out.dataPos + zrd.out.dataPos = 0 + zrd.out.data = nil + return + default: + return + } + } + + err = lz4errors.ErrInternalUnhandledState + return +} + +// Reset makes the stream usable again; mostly handy to reuse lz4 encoder +// instances. +func (zrd *CompressingReader) Reset(src io.ReadCloser) { + zrd.frame.Reset(1) + zrd.state = crStateInitial + zrd.src = src + zrd.out.clear() +} + +type ovWriter struct { + data []byte + ov []byte + dataPos int + ovPos int +} + +func (wr *ovWriter) Write(p []byte) (n int, err error) { + count := copy(wr.data[wr.dataPos : ], p) + wr.dataPos += count + + if count < len(p) { + wr.ov = append(wr.ov, p[count : ]...) + } + + return len(p), nil +} + +func (wr *ovWriter) reset(out []byte) bool { + ovRem := len(wr.ov) - wr.ovPos + + if ovRem >= len(out) { + wr.ovPos += copy(out, wr.ov[wr.ovPos : ]) + return false + } + + if ovRem > 0 { + copy(out, wr.ov[wr.ovPos : ]) + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 + wr.dataPos = ovRem + } else if wr.ovPos > 0 { + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 + wr.dataPos = 0 + } + + wr.data = out + return true +} + +func (wr *ovWriter) clear() { + wr.data = nil + wr.dataPos = 0 + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go index 9054998f..fec8adb0 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go @@ -31,11 +31,10 @@ func recoverBlock(e *error) { } } -// blockHash hashes the lower five bytes of x into a value < htSize. +// blockHash hashes the lower 6 bytes into a value < htSize. func blockHash(x uint64) uint32 { const prime6bytes = 227718039650203 - x &= 1<<40 - 1 - return uint32((x * prime6bytes) >> (64 - hashLog)) + return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) } func CompressBlockBound(n int) int { @@ -123,9 +122,9 @@ func (c *Compressor) CompressBlock(src, dst []byte) (int, error) { goto lastLiterals } - // Fast scan strategy: the hash table only stores the last five-byte sequences. + // Fast scan strategy: the hash table only stores the last 4 bytes sequences. for si < sn { - // Hash the next five bytes (sequence)... + // Hash the next 6 bytes (sequence)... match := binary.LittleEndian.Uint64(src[si:]) h := blockHash(match) h2 := blockHash(match >> 8) diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go index a1bfa99e..138083d9 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go @@ -8,12 +8,9 @@ const ( Block256Kb Block1Mb Block4Mb + Block8Mb = 2 * Block4Mb ) -// In legacy mode all blocks are compressed regardless -// of the compressed size: use the bound size. -var Block8Mb = uint32(CompressBlockBound(8 << 20)) - var ( BlockPool64K = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }} BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s index c43e8a8d..d2fe11b8 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s @@ -185,7 +185,7 @@ copyMatchTry8: // A 16-at-a-time loop doesn't provide a further speedup. CMP $8, len CCMP HS, offset, $8, $0 - BLO copyMatchLoop1 + BLO copyMatchTry4 AND $7, len, lenRem SUB $8, len @@ -201,8 +201,19 @@ copyMatchLoop8: MOVD tmp2, -8(dst) B copyMatchDone +copyMatchTry4: + // Copy words if both len and offset are at least four. + CMP $4, len + CCMP HS, offset, $4, $0 + BLO copyMatchLoop1 + + MOVWU.P 4(match), tmp2 + MOVWU.P tmp2, 4(dst) + SUBS $4, len + BEQ copyMatchDone + copyMatchLoop1: - // Byte-at-a-time copy for small offsets. + // Byte-at-a-time copy for small offsets <= 3. MOVBU.P 1(match), tmp2 MOVB.P tmp2, 1(dst) SUBS $1, len diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go index 2010cd74..9f568fbb 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go @@ -48,11 +48,14 @@ func decodeBlock(dst, src, dict []byte) (ret int) { mLen += 4 if offset := u16(src[si:]); mLen <= offset && offset < di { i := di - offset - end := i + 18 - copy(dst[di:], dst[i:end]) - si += 2 - di += mLen - continue + // The remaining buffer may not hold 18 bytes. + // See https://github.com/pierrec/lz4/issues/51. + if end := i + 18; end <= uint(len(dst)) { + copy(dst[di:], dst[i:end]) + si += 2 + di += mLen + continue + } } } case lLen == 0xF: diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go index 459086f0..e9646546 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go @@ -224,9 +224,7 @@ func (b *FrameDataBlock) Close(f *Frame) { func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock { data := b.data if f.isLegacy() { - // In legacy mode, the buffer is sized according to CompressBlockBound, - // but only 8Mb is buffered for compression. - src = src[:8<<20] + data = data[:cap(data)] } else { data = data[:len(src)] // trigger the incompressible flag in CompressBlock } diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go index 8d3206a8..651d10c1 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go @@ -1,5 +1,5 @@ // Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). -// (https://github.com/Cyan4973/XXH/) +// (ported from the reference implementation https://github.com/Cyan4973/xxHash/) package xxh32 import ( diff --git a/vendor/github.com/pierrec/lz4/v4/options.go b/vendor/github.com/pierrec/lz4/v4/options.go index 46a87380..57a44e76 100644 --- a/vendor/github.com/pierrec/lz4/v4/options.go +++ b/vendor/github.com/pierrec/lz4/v4/options.go @@ -57,6 +57,13 @@ func BlockSizeOption(size BlockSize) Option { } w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size)) return nil + case *CompressingReader: + size := uint32(size) + if !lz4block.IsValid(size) { + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size) + } + w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size)) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -72,6 +79,9 @@ func BlockChecksumOption(flag bool) Option { case *Writer: w.frame.Descriptor.Flags.BlockChecksumSet(flag) return nil + case *CompressingReader: + w.frame.Descriptor.Flags.BlockChecksumSet(flag) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -87,6 +97,9 @@ func ChecksumOption(flag bool) Option { case *Writer: w.frame.Descriptor.Flags.ContentChecksumSet(flag) return nil + case *CompressingReader: + w.frame.Descriptor.Flags.ContentChecksumSet(flag) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -104,6 +117,10 @@ func SizeOption(size uint64) Option { w.frame.Descriptor.Flags.SizeSet(size > 0) w.frame.Descriptor.ContentSize = size return nil + case *CompressingReader: + w.frame.Descriptor.Flags.SizeSet(size > 0) + w.frame.Descriptor.ContentSize = size + return nil } return lz4errors.ErrOptionNotApplicable } @@ -162,6 +179,14 @@ func CompressionLevelOption(level CompressionLevel) Option { } w.level = lz4block.CompressionLevel(level) return nil + case *CompressingReader: + switch level { + case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9: + default: + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level) + } + w.level = lz4block.CompressionLevel(level) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -186,6 +211,9 @@ func OnBlockDoneOption(handler func(size int)) Option { case *Reader: rw.handler = handler return nil + case *CompressingReader: + rw.handler = handler + return nil } return lz4errors.ErrOptionNotApplicable } diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go index 77699f2b..4358adee 100644 --- a/vendor/github.com/pierrec/lz4/v4/writer.go +++ b/vendor/github.com/pierrec/lz4/v4/writer.go @@ -150,6 +150,10 @@ func (w *Writer) Flush() (err error) { case writeState: case errorState: return w.state.err + case newState: + if err = w.init(); w.state.next(err) { + return + } default: return nil } diff --git a/vendor/github.com/sorairolake/lzip-go/.bumpversion.toml b/vendor/github.com/sorairolake/lzip-go/.bumpversion.toml new file mode 100644 index 00000000..76cfb0f0 --- /dev/null +++ b/vendor/github.com/sorairolake/lzip-go/.bumpversion.toml @@ -0,0 +1,15 @@ +# SPDX-FileCopyrightText: 2024 Shun Sakai +# +# SPDX-License-Identifier: Apache-2.0 OR MIT + +[tool.bumpversion] +current_version = "0.3.5" + +[[tool.bumpversion.files]] +filename = "cmd/glzip/cli.go" + +[[tool.bumpversion.files]] +filename = "cmd/glzip/testdata/version.ct" + +[[tool.bumpversion.files]] +filename = "docs/man/man1/glzip.1.adoc" diff --git a/vendor/github.com/sorairolake/lzip-go/.gitignore b/vendor/github.com/sorairolake/lzip-go/.gitignore new file mode 100644 index 00000000..caead21c --- /dev/null +++ b/vendor/github.com/sorairolake/lzip-go/.gitignore @@ -0,0 +1,20 @@ +# SPDX-FileCopyrightText: 2024 Shun Sakai +# +# SPDX-License-Identifier: Apache-2.0 OR MIT + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +/glzip + +# Test binary, built with `go test -c` +*.test + +# Generated man page +glzip.1 + +# GoReleaser +dist/ diff --git a/vendor/github.com/sorairolake/lzip-go/.goreleaser.yaml b/vendor/github.com/sorairolake/lzip-go/.goreleaser.yaml new file mode 100644 index 00000000..42c78a16 --- /dev/null +++ b/vendor/github.com/sorairolake/lzip-go/.goreleaser.yaml @@ -0,0 +1,53 @@ +# SPDX-FileCopyrightText: 2024 Shun Sakai +# +# SPDX-License-Identifier: Apache-2.0 OR MIT + +version: 1 + +before: + hooks: + - go mod tidy + - asciidoctor -b manpage docs/man/man1/glzip.1.adoc + +builds: + - main: ./cmd/glzip + binary: glzip + flags: + - -trimpath + ldflags: + - -s -w + env: + - CGO_ENABLED=0 + goos: + - darwin + - freebsd + - linux + - openbsd + - windows + goarch: + - amd64 + - arm64 + +archives: + - format: tar.zst + # use zip for windows archives + format_overrides: + - goos: windows + format: zip + files: + - AUTHORS.adoc + - CHANGELOG.adoc + - CONTRIBUTING.adoc + - docs/man/man1/glzip.1 + - LICENSES/* + - README.md + +checksum: + algorithm: sha3-512 + +changelog: + sort: asc + filters: + exclude: + - "^docs:" + - "^test:" diff --git a/vendor/github.com/sorairolake/lzip-go/AUTHORS.adoc b/vendor/github.com/sorairolake/lzip-go/AUTHORS.adoc new file mode 100644 index 00000000..53949a7d --- /dev/null +++ b/vendor/github.com/sorairolake/lzip-go/AUTHORS.adoc @@ -0,0 +1,9 @@ +// SPDX-FileCopyrightText: 2024 Shun Sakai +// +// SPDX-License-Identifier: Apache-2.0 OR MIT + += List of Authors + +== Original author + +* https://github.com/sorairolake[Shun Sakai] diff --git a/vendor/github.com/sorairolake/lzip-go/CHANGELOG.adoc b/vendor/github.com/sorairolake/lzip-go/CHANGELOG.adoc new file mode 100644 index 00000000..4834bbdf --- /dev/null +++ b/vendor/github.com/sorairolake/lzip-go/CHANGELOG.adoc @@ -0,0 +1,71 @@ +// SPDX-FileCopyrightText: 2024 Shun Sakai +// +// SPDX-License-Identifier: Apache-2.0 OR MIT + += Changelog +:toc: preamble +:project-url: https://github.com/sorairolake/lzip-go +:compare-url: {project-url}/compare +:issue-url: {project-url}/issues +:pull-request-url: {project-url}/pull + +All notable changes to this project will be documented in this file. + +The format is based on https://keepachangelog.com/[Keep a Changelog], and this +project adheres to https://semver.org/[Semantic Versioning]. + +== {compare-url}/v0.3.4\...v0.3.5[0.3.5] - 2024-08-04 + +=== Changed + +* Update man pages + +== {compare-url}/v0.3.3\...v0.3.4[0.3.4] - 2024-05-02 + +=== Changed + +* Change to provide pre-built binaries ({pull-request-url}/21[#21]) + +== {compare-url}/v0.3.2\...v0.3.3[0.3.3] - 2024-04-16 + +=== Changed + +* Update document + +== {compare-url}/v0.3.1\...v0.3.2[0.3.2] - 2024-04-10 + +=== Changed + +* Ungroup constants ({pull-request-url}/13[#13]) + +== {compare-url}/v0.3.0\...v0.3.1[0.3.1] - 2024-04-08 + +=== Changed + +* Update document for errors ({pull-request-url}/11[#11]) + +== {compare-url}/v0.2.0\...v0.3.0[0.3.0] - 2024-04-07 + +=== Changed + +* Change errors to include details ({pull-request-url}/8[#8]) + +== {compare-url}/v0.1.0\...v0.2.0[0.2.0] - 2024-04-05 + +=== Added + +* Add a simple command-line utility for reading and writing of lzip format + compressed files ({pull-request-url}/4[#4]) + +=== Changed + +* Export constants regarding the dictionary size and the member size + ({pull-request-url}/3[#3]) +* Change the type of `WriterOptions.DictSize` from `int` to `uint32` + ({pull-request-url}/5[#5]) + +== {project-url}/releases/tag/v0.1.0[0.1.0] - 2024-04-04 + +=== Added + +* Initial release diff --git a/vendor/github.com/sorairolake/lzip-go/CODE_OF_CONDUCT.md b/vendor/github.com/sorairolake/lzip-go/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..4063f630 --- /dev/null +++ b/vendor/github.com/sorairolake/lzip-go/CODE_OF_CONDUCT.md @@ -0,0 +1,138 @@ + + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/vendor/github.com/sorairolake/lzip-go/CONTRIBUTING.adoc b/vendor/github.com/sorairolake/lzip-go/CONTRIBUTING.adoc new file mode 100644 index 00000000..1fb64745 --- /dev/null +++ b/vendor/github.com/sorairolake/lzip-go/CONTRIBUTING.adoc @@ -0,0 +1,61 @@ +// SPDX-FileCopyrightText: 2024 Shun Sakai +// +// SPDX-License-Identifier: Apache-2.0 OR MIT + += Contribution Guide +:git-flow-url: https://nvie.com/posts/a-successful-git-branching-model/ +:commit-messages-guide-url: https://github.com/RomuloOliveira/commit-messages-guide +:conventionalcommits-url: https://www.conventionalcommits.org/en/v1.0.0/ + +Thank you for your interest in contributing to this project! If you would like +to contribute to this project, please follow the instructions below if possible. + +== Branching model + +The branching model of this project is based on the {git-flow-url}[git-flow]. + +== Style guides + +=== Commit message + +Please see the {commit-messages-guide-url}[Commit messages guide] and the +{conventionalcommits-url}[Conventional Commits]. + +== Submitting a pull request + +. Create a working branch from the `develop` branch. The branch name should be + something other than `develop` or `master`. +. Create your patch. If your change is a feature or a bugfix, please add a test + case if possible. Note that the change must pass the CI. +. Please update the copyright information if possible. This project is + compliant with version 3.2 of the + https://reuse.software/spec/[_REUSE Specification_]. + https://github.com/fsfe/reuse-tool[`reuse`] is useful for updating the + copyright information. +. Please update the link:CHANGELOG.adoc[Changelog] if possible. +. Please read and agree to follow the link:CODE_OF_CONDUCT.md[Code of Conduct]. + +== Development + +=== Useful development tools + +The https://github.com/casey/just[just] command runner can be used. Run +`just --list` for more details. + +.Run tests +[source,sh] +---- +just test +---- + +.Run the formatter +[source,sh] +---- +just fmt +---- + +.Run the linter +[source,sh] +---- +just lint +---- diff --git a/vendor/github.com/sorairolake/lzip-go/LICENSE b/vendor/github.com/sorairolake/lzip-go/LICENSE new file mode 100644 index 00000000..eec8edd3 --- /dev/null +++ b/vendor/github.com/sorairolake/lzip-go/LICENSE @@ -0,0 +1,225 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +--- + +MIT License + +Copyright (c) 2024 Shun Sakai + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sorairolake/lzip-go/README.md b/vendor/github.com/sorairolake/lzip-go/README.md new file mode 100644 index 00000000..16812875 --- /dev/null +++ b/vendor/github.com/sorairolake/lzip-go/README.md @@ -0,0 +1,119 @@ + + +# lzip-go + +[![CI][ci-badge]][ci-url] +[![Go Reference][reference-badge]][reference-url] +![Go version][go-version-badge] + +**lzip-go** is an implementation of the [lzip compressed format] written in +pure [Go]. + +This package supports reading and writing of lzip compressed streams. + +## Usage + +To install this package: + +```sh +go get -u github.com/sorairolake/lzip-go +``` + +### Example + +Please see [`example_test.go`]. + +### Documentation + +See the [documentation][reference-url] for more details. + +## Command-line utility + +This package includes a simple command-line utility for reading and writing of +lzip format compressed files. + +### Installation + +#### From source + +```sh +go install github.com/sorairolake/lzip-go/cmd/glzip@latest +``` + +#### From binaries + +The [release page] contains pre-built binaries for Linux, macOS, Windows and +others. + +#### How to build + +To build the command-line utility: + +```sh +just build-cmd +``` + +To build a man page: + +```sh +just build-man +``` + +The man page is generated in `docs/man/man1`. Note that [Asciidoctor] is +required when building the man page. + +### Usage + +Please see [`glzip(1)`]. + +## Minimum Go version + +This package requires the minimum version of Go 1.22. + +## Changelog + +Please see [CHANGELOG.adoc]. + +## Contributing + +Please see [CONTRIBUTING.adoc]. + +## Acknowledgment + +The API of this package is based on the [`compress/gzip`] package. + +This package uses the [`github.com/ulikunitz/xz/lzma`] package to encode and +decode LZMA streams. + +## License + +Copyright © 2024 Shun Sakai (see [AUTHORS.adoc]) + +This package is distributed under the terms of either the _Apache License 2.0_ +or the _MIT License_. + +This project is compliant with version 3.2 of the [_REUSE Specification_]. See +copyright notices of individual files for more details on copyright and +licensing information. + +[ci-badge]: https://img.shields.io/github/actions/workflow/status/sorairolake/lzip-go/CI.yaml?branch=develop&style=for-the-badge&logo=github&label=CI +[ci-url]: https://github.com/sorairolake/lzip-go/actions?query=branch%3Adevelop+workflow%3ACI++ +[reference-badge]: https://img.shields.io/badge/Go-Reference-steelblue?style=for-the-badge&logo=go +[reference-url]: https://pkg.go.dev/github.com/sorairolake/lzip-go +[go-version-badge]: https://img.shields.io/github/go-mod/go-version/sorairolake/lzip-go?style=for-the-badge&logo=go +[lzip compressed format]: https://www.nongnu.org/lzip/manual/lzip_manual.html#File-format +[Go]: https://go.dev/ +[`example_test.go`]: example_test.go +[release page]: https://github.com/sorairolake/lzip-go/releases +[Asciidoctor]: https://asciidoctor.org/ +[`glzip(1)`]: docs/man/man1/glzip.1.adoc +[CHANGELOG.adoc]: CHANGELOG.adoc +[CONTRIBUTING.adoc]: CONTRIBUTING.adoc +[`compress/gzip`]: https://pkg.go.dev/compress/gzip +[`github.com/ulikunitz/xz/lzma`]: https://pkg.go.dev/github.com/ulikunitz/xz/lzma +[AUTHORS.adoc]: AUTHORS.adoc +[_REUSE Specification_]: https://reuse.software/spec/ diff --git a/vendor/github.com/sorairolake/lzip-go/error.go b/vendor/github.com/sorairolake/lzip-go/error.go new file mode 100644 index 00000000..900a8fd9 --- /dev/null +++ b/vendor/github.com/sorairolake/lzip-go/error.go @@ -0,0 +1,107 @@ +// SPDX-FileCopyrightText: 2024 Shun Sakai +// +// SPDX-License-Identifier: Apache-2.0 OR MIT + +package lzip + +import "errors" + +// ErrInvalidMagic represents an error due to the magic number was invalid. +var ErrInvalidMagic = errors.New("lzip: invalid magic number") + +// UnsupportedVersionError represents an error due to the version number stored +// in the header indicated the lzip format which is not supported by this +// package. +type UnsupportedVersionError struct { + // Version represents the obtained version number. + Version byte +} + +// Error returns a string representation of an [UnsupportedVersionError]. +func (e *UnsupportedVersionError) Error() string { + return "lzip: unsupported version number" +} + +// UnknownVersionError represents an error due to the version number stored in +// the header was not recognized by this package. +type UnknownVersionError struct { + // Version represents the obtained version number. + Version byte +} + +// Error returns a string representation of an [UnknownVersionError]. +func (e *UnknownVersionError) Error() string { + return "lzip: unknown version number" +} + +// DictSizeTooSmallError represents an error due to the dictionary size was +// smaller than 4 KiB. +type DictSizeTooSmallError struct { + // DictSize represents the obtained dictionary size. + DictSize uint32 +} + +// Error returns a string representation of a [DictSizeTooSmallError]. +func (e *DictSizeTooSmallError) Error() string { + return "lzip: dictionary size is too small" +} + +// DictSizeTooLargeError represents an error due to the dictionary size was +// larger than 512 MiB. +type DictSizeTooLargeError struct { + // DictSize represents the obtained dictionary size. + DictSize uint32 +} + +// Error returns a string representation of a [DictSizeTooLargeError]. +func (e *DictSizeTooLargeError) Error() string { + return "lzip: dictionary size is too large" +} + +// InvalidCRCError represents an error due to a CRC of the original +// uncompressed data mismatched. +type InvalidCRCError struct { + // CRC represents the obtained CRC. + CRC uint32 +} + +// Error returns a string representation of an [InvalidCRCError]. +func (e *InvalidCRCError) Error() string { + return "lzip: CRC mismatch" +} + +// InvalidDataSizeError represents an error due to the size of the original +// uncompressed data stored in the trailer and the actual size of it mismatched. +type InvalidDataSizeError struct { + // DataSize represents the obtained data size. + DataSize uint64 +} + +// Error returns a string representation of an [InvalidDataSizeError]. +func (e *InvalidDataSizeError) Error() string { + return "lzip: data size mismatch" +} + +// InvalidMemberSizeError represents an error due to the total size of the +// member stored in the trailer and the actual total size of it mismatched. +type InvalidMemberSizeError struct { + // MemberSize represents the obtained member size. + MemberSize uint64 +} + +// Error returns a string representation of an [InvalidMemberSizeError]. +func (e *InvalidMemberSizeError) Error() string { + return "lzip: member size mismatch" +} + +// MemberSizeTooLargeError represents an error due to the member size was +// larger than 2 PiB. +type MemberSizeTooLargeError struct { + // MemberSize represents the obtained member size. + MemberSize uint64 +} + +// Error returns a string representation of a [MemberSizeTooLargeError]. +func (e *MemberSizeTooLargeError) Error() string { + return "lzip: member size is too large" +} diff --git a/vendor/github.com/sorairolake/lzip-go/go.sum.license b/vendor/github.com/sorairolake/lzip-go/go.sum.license new file mode 100644 index 00000000..df26b1a7 --- /dev/null +++ b/vendor/github.com/sorairolake/lzip-go/go.sum.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2024 Shun Sakai + +SPDX-License-Identifier: Apache-2.0 OR MIT diff --git a/vendor/github.com/sorairolake/lzip-go/justfile b/vendor/github.com/sorairolake/lzip-go/justfile new file mode 100644 index 00000000..11931034 --- /dev/null +++ b/vendor/github.com/sorairolake/lzip-go/justfile @@ -0,0 +1,67 @@ +# SPDX-FileCopyrightText: 2024 Shun Sakai +# +# SPDX-License-Identifier: Apache-2.0 OR MIT + +alias all := default +alias build-cmd := build-cmd-debug + +# Run default recipe +default: test + +# Remove generated artifacts +@clean: + go clean + +# Run tests +@test: + go test ./... + +# Run `golangci-lint run` +@golangci-lint: + golangci-lint run -E gofmt,goimports + +# Run the formatter +fmt: gofmt goimports + +# Run `go fmt` +@gofmt: + go fmt ./... + +# Run `goimports` +@goimports: + fd -e go -x goimports -w + +# Run the linter +lint: vet staticcheck + +# Run `go vet` +@vet: + go vet ./... + +# Run `staticcheck` +@staticcheck: + staticcheck ./... + +# Build `glzip` command in debug mode +@build-cmd-debug $CGO_ENABLED="0": + go build ./cmd/glzip + +# Build `glzip` command in release mode +@build-cmd-release $CGO_ENABLED="0": + go build -ldflags="-s -w" -trimpath ./cmd/glzip + +# Build `glzip(1)` +@build-man: + asciidoctor -b manpage docs/man/man1/glzip.1.adoc + +# Run the linter for GitHub Actions workflow files +@lint-github-actions: + actionlint -verbose + +# Run the formatter for the README +@fmt-readme: + npx prettier -w README.md + +# Increment the version +@bump part: + bump-my-version bump {{part}} diff --git a/vendor/github.com/sorairolake/lzip-go/lzip.go b/vendor/github.com/sorairolake/lzip-go/lzip.go new file mode 100644 index 00000000..d0938eed --- /dev/null +++ b/vendor/github.com/sorairolake/lzip-go/lzip.go @@ -0,0 +1,76 @@ +// SPDX-FileCopyrightText: 2024 Shun Sakai +// +// SPDX-License-Identifier: Apache-2.0 OR MIT + +// Package lzip implements reading and writing of lzip format compressed files. +// The package supports version 1 of the specification. +// +// See the following for the specification: +// +// - https://www.nongnu.org/lzip/manual/lzip_manual.html#File-format +// - https://datatracker.ietf.org/doc/html/draft-diaz-lzip-09#section-2 +package lzip + +import ( + "math/bits" + + "github.com/ulikunitz/xz/lzma" +) + +const ( + headerSize = 6 + trailerSize = 20 +) + +const magic = "LZIP" +const magicSize = 4 + +type version byte + +const ( + version0 version = iota + version1 +) + +// MinDictSize is the minimum dictionary size, which is 4 KiB. +const MinDictSize = lzma.MinDictCap + +// MaxDictSize is the maximum dictionary size, which is 512 MiB. +const MaxDictSize = 1 << 29 + +// DefaultDictSize is the default dictionary size, which is 8 MiB. +const DefaultDictSize = 1 << 23 + +// MaxMemberSize is the maximum member size, which is 2 PiB. +const MaxMemberSize = 1 << 51 + +type header struct { + magic [magicSize]byte + version + dictSize byte +} + +func newHeader(dictSize uint32) *header { + ds := bits.Len32(dictSize - 1) + + if dictSize > MinDictSize { + base := 1 << dictSize + frac := base / 16 + + for i := 7; i >= 1; i-- { + if (base - (i * frac)) >= ds { + ds |= i << 5 + } + } + } + + z := &header{[magicSize]byte([]byte(magic)), version1, byte(ds)} + + return z +} + +type trailer struct { + crc uint32 + dataSize uint64 + memberSize uint64 +} diff --git a/vendor/github.com/sorairolake/lzip-go/reader.go b/vendor/github.com/sorairolake/lzip-go/reader.go new file mode 100644 index 00000000..af14464e --- /dev/null +++ b/vendor/github.com/sorairolake/lzip-go/reader.go @@ -0,0 +1,124 @@ +// SPDX-FileCopyrightText: 2024 Shun Sakai +// +// SPDX-License-Identifier: Apache-2.0 OR MIT + +package lzip + +import ( + "bytes" + "encoding/binary" + "errors" + "hash/crc32" + "io" + "slices" + + "github.com/ulikunitz/xz/lzma" +) + +// Reader is an [io.Reader] that can be read to retrieve uncompressed data from +// a lzip-format compressed file. +type Reader struct { + r io.Reader + decompressor *lzma.Reader + trailer +} + +// NewReader creates a new [Reader] reading the given reader. +func NewReader(r io.Reader) (*Reader, error) { + z := new(Reader) + + var header [headerSize]byte + if _, err := r.Read(header[:]); err != nil { + return nil, err + } + + if !slices.Equal(header[:magicSize], []byte(magic)) { + return nil, ErrInvalidMagic + } + + switch v := header[4]; v { + case 0: + return nil, &UnsupportedVersionError{v} + case 1: + default: + return nil, &UnknownVersionError{v} + } + + dictSize := uint32(1 << (header[5] & 0x1f)) + dictSize -= (dictSize / 16) * uint32((header[5]>>5)&0x07) + + switch { + case dictSize < MinDictSize: + return nil, &DictSizeTooSmallError{dictSize} + case dictSize > MaxDictSize: + return nil, &DictSizeTooLargeError{dictSize} + } + + rb, err := io.ReadAll(r) + + if err != nil { + return nil, err + } + + var lzmaHeader [lzma.HeaderLen]byte + lzmaHeader[0] = lzma.Properties{LC: 3, LP: 0, PB: 2}.Code() + binary.LittleEndian.PutUint32(lzmaHeader[1:5], dictSize) + copy(lzmaHeader[5:], rb[len(rb)-16:len(rb)-8]) + + z.trailer.memberSize = uint64(headerSize + len(rb)) + if memberSize := z.trailer.memberSize; memberSize > MaxMemberSize { + return nil, &MemberSizeTooLargeError{memberSize} + } + + rb = slices.Concat(lzmaHeader[:], rb) + + r = bytes.NewReader(rb) + + z.decompressor, err = lzma.NewReader(r) + if err != nil { + return nil, err + } + + z.r = r + + return z, nil +} + +// Read reads uncompressed data from the stream. +func (z *Reader) Read(p []byte) (n int, err error) { + for n == 0 { + n, err = z.decompressor.Read(p) + if err != nil { + return n, err + } + + z.trailer.crc = crc32.Update(z.trailer.crc, crc32.IEEETable, p[:n]) + z.trailer.dataSize += uint64(n) + + if !errors.Is(err, io.EOF) { + return n, err + } + + var trailer [trailerSize]byte + if _, err := io.ReadFull(z.r, trailer[:]); err != nil { + return n, err + } + + crc := binary.LittleEndian.Uint32(trailer[:4]) + if crc != z.trailer.crc { + return n, &InvalidCRCError{crc} + } + + dataSize := binary.LittleEndian.Uint64(trailer[4:12]) + if dataSize != z.trailer.dataSize { + return n, &InvalidDataSizeError{dataSize} + } + + memberSize := binary.LittleEndian.Uint64(trailer[12:]) + if memberSize != z.trailer.memberSize { + return n, &InvalidMemberSizeError{memberSize} + } + } + + return n, nil +} diff --git a/vendor/github.com/sorairolake/lzip-go/writer.go b/vendor/github.com/sorairolake/lzip-go/writer.go new file mode 100644 index 00000000..4f1777f3 --- /dev/null +++ b/vendor/github.com/sorairolake/lzip-go/writer.go @@ -0,0 +1,147 @@ +// SPDX-FileCopyrightText: 2024 Shun Sakai +// +// SPDX-License-Identifier: Apache-2.0 OR MIT + +package lzip + +import ( + "bytes" + "encoding/binary" + "hash/crc32" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// Writer is an [io.WriteCloser] that can be written to retrieve a lzip-format +// compressed file from data. +type Writer struct { + w io.Writer + compressor *lzma.Writer + buf bytes.Buffer + header *header + wroteHeader bool + trailer + closed bool +} + +// WriterOptions configures [Writer]. +type WriterOptions struct { + // DictSize sets the dictionary size. + DictSize uint32 +} + +func newWriterOptions() *WriterOptions { + opt := &WriterOptions{DefaultDictSize} + + return opt +} + +// Verify checks if [WriterOptions] is valid. +func (o *WriterOptions) Verify() error { + switch dictSize := o.DictSize; { + case dictSize < MinDictSize: + return &DictSizeTooSmallError{dictSize} + case dictSize > MaxDictSize: + return &DictSizeTooLargeError{dictSize} + } + + return nil +} + +// NewWriter creates a new [Writer] writing the given writer. +// +// This uses the default parameters. +func NewWriter(w io.Writer) *Writer { + opt := newWriterOptions() + + z, err := NewWriterOptions(w, opt) + if err != nil { + panic(err) + } + + return z +} + +// NewWriterOptions creates a new [Writer] writing the given writer. +// +// This uses the given [WriterOptions]. +func NewWriterOptions(w io.Writer, opt *WriterOptions) (*Writer, error) { + if err := opt.Verify(); err != nil { + return nil, err + } + + z := &Writer{w: w} + + compressor, err := lzma.WriterConfig{DictCap: int(opt.DictSize)}.NewWriter(&z.buf) + if err != nil { + return nil, err + } + + z.compressor = compressor + + header := newHeader(opt.DictSize) + z.header = header + + return z, nil +} + +// Write compresses the given uncompressed data. +func (z *Writer) Write(p []byte) (int, error) { + if !z.wroteHeader { + z.wroteHeader = true + + var header [headerSize]byte + + copy(header[:magicSize], z.header.magic[:]) + header[4] = byte(z.header.version) + header[5] = z.header.dictSize + + if _, err := z.w.Write(header[:]); err != nil { + return 0, err + } + } + + n, err := z.compressor.Write(p) + if err != nil { + return n, err + } + + z.trailer.crc = crc32.Update(z.trailer.crc, crc32.IEEETable, p) + z.trailer.dataSize += uint64(len(p)) + + return n, nil +} + +// Close closes the [Writer] and writing the lzip trailer. It does not close +// the underlying [io.Writer]. +func (z *Writer) Close() error { + if z.closed { + return nil + } + + z.closed = true + + if err := z.compressor.Close(); err != nil { + return err + } + + cb := z.buf.Bytes()[lzma.HeaderLen:] + if _, err := z.w.Write(cb); err != nil { + return err + } + + var trailer [trailerSize]byte + + binary.LittleEndian.PutUint32(trailer[:4], z.trailer.crc) + binary.LittleEndian.PutUint64(trailer[4:12], z.trailer.dataSize) + binary.LittleEndian.PutUint64(trailer[12:], headerSize+uint64(len(cb))+trailerSize) + + if memberSize := binary.LittleEndian.Uint64(trailer[12:]); memberSize > MaxMemberSize { + return &MemberSizeTooLargeError{memberSize} + } + + _, err := z.w.Write(trailer[:]) + + return err +} diff --git a/vendor/go4.org/syncutil/gate.go b/vendor/go4.org/syncutil/gate.go new file mode 100644 index 00000000..e4592be9 --- /dev/null +++ b/vendor/go4.org/syncutil/gate.go @@ -0,0 +1,41 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syncutil + +// A Gate limits concurrency. +type Gate struct { + c chan struct{} +} + +// NewGate returns a new gate that will only permit max operations at once. +func NewGate(max int) *Gate { + return &Gate{make(chan struct{}, max)} +} + +// Start starts an operation, blocking until the gate has room. +func (g *Gate) Start() { + g.c <- struct{}{} +} + +// Done finishes an operation. +func (g *Gate) Done() { + select { + case <-g.c: + default: + panic("Done called more than Start") + } +} diff --git a/vendor/go4.org/syncutil/group.go b/vendor/go4.org/syncutil/group.go new file mode 100644 index 00000000..dacef4c4 --- /dev/null +++ b/vendor/go4.org/syncutil/group.go @@ -0,0 +1,64 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syncutil + +import "sync" + +// A Group is like a sync.WaitGroup and coordinates doing +// multiple things at once. Its zero value is ready to use. +type Group struct { + wg sync.WaitGroup + mu sync.Mutex // guards errs + errs []error +} + +// Go runs fn in its own goroutine, but does not wait for it to complete. +// Call Err or Errs to wait for all the goroutines to complete. +func (g *Group) Go(fn func() error) { + g.wg.Add(1) + go func() { + defer g.wg.Done() + err := fn() + if err != nil { + g.mu.Lock() + defer g.mu.Unlock() + g.errs = append(g.errs, err) + } + }() +} + +// Wait waits for all the previous calls to Go to complete. +func (g *Group) Wait() { + g.wg.Wait() +} + +// Err waits for all previous calls to Go to complete and returns the +// first non-nil error, or nil. +func (g *Group) Err() error { + g.wg.Wait() + if len(g.errs) > 0 { + return g.errs[0] + } + return nil +} + +// Errs waits for all previous calls to Go to complete and returns +// all non-nil errors. +func (g *Group) Errs() []error { + g.wg.Wait() + return g.errs +} diff --git a/vendor/go4.org/syncutil/once.go b/vendor/go4.org/syncutil/once.go new file mode 100644 index 00000000..cd276cec --- /dev/null +++ b/vendor/go4.org/syncutil/once.go @@ -0,0 +1,60 @@ +/* +Copyright 2014 The Perkeep Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syncutil + +import ( + "sync" + "sync/atomic" +) + +// A Once will perform a successful action exactly once. +// +// Unlike a sync.Once, this Once's func returns an error +// and is re-armed on failure. +type Once struct { + m sync.Mutex + done uint32 +} + +// Do calls the function f if and only if Do has not been invoked +// without error for this instance of Once. In other words, given +// var once Once +// if once.Do(f) is called multiple times, only the first call will +// invoke f, even if f has a different value in each invocation unless +// f returns an error. A new instance of Once is required for each +// function to execute. +// +// Do is intended for initialization that must be run exactly once. Since f +// is niladic, it may be necessary to use a function literal to capture the +// arguments to a function to be invoked by Do: +// err := config.once.Do(func() error { return config.init(filename) }) +func (o *Once) Do(f func() error) error { + if atomic.LoadUint32(&o.done) == 1 { + return nil + } + // Slow-path. + o.m.Lock() + defer o.m.Unlock() + var err error + if o.done == 0 { + err = f() + if err == nil { + atomic.StoreUint32(&o.done, 1) + } + } + return err +} diff --git a/vendor/go4.org/syncutil/sem.go b/vendor/go4.org/syncutil/sem.go new file mode 100644 index 00000000..092655ff --- /dev/null +++ b/vendor/go4.org/syncutil/sem.go @@ -0,0 +1,64 @@ +package syncutil + +import ( + "fmt" + "log" + "sync" +) + +type debugT bool + +var debug = debugT(false) + +func (d debugT) Printf(format string, args ...interface{}) { + if bool(d) { + log.Printf(format, args...) + } +} + +// Sem implements a semaphore that can have multiple units acquired/released +// at a time. +type Sem struct { + c *sync.Cond // Protects size + max, free int64 +} + +// NewSem creates a semaphore with max units available for acquisition. +func NewSem(max int64) *Sem { + return &Sem{ + c: sync.NewCond(new(sync.Mutex)), + free: max, + max: max, + } +} + +// Acquire will deduct n units from the semaphore. If the deduction would +// result in the available units falling below zero, the call will block until +// another go routine returns units via a call to Release. If more units are +// requested than the semaphore is configured to hold, error will be non-nil. +func (s *Sem) Acquire(n int64) error { + if n > s.max { + return fmt.Errorf("sem: attempt to acquire more units than semaphore size %d > %d", n, s.max) + } + s.c.L.Lock() + defer s.c.L.Unlock() + for { + debug.Printf("Acquire check max %d free %d, n %d", s.max, s.free, n) + if s.free >= n { + s.free -= n + return nil + } + debug.Printf("Acquire Wait max %d free %d, n %d", s.max, s.free, n) + s.c.Wait() + } +} + +// Release will return n units to the semaphore and notify any currently +// blocking Acquire calls. +func (s *Sem) Release(n int64) { + s.c.L.Lock() + defer s.c.L.Unlock() + debug.Printf("Release max %d free %d, n %d", s.max, s.free, n) + s.free += n + s.c.Broadcast() +} diff --git a/vendor/go4.org/syncutil/syncutil.go b/vendor/go4.org/syncutil/syncutil.go new file mode 100644 index 00000000..851aebd2 --- /dev/null +++ b/vendor/go4.org/syncutil/syncutil.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Perkeep Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package syncutil provides various synchronization utilities. +package syncutil // import "go4.org/syncutil" diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/text/LICENSE +++ b/vendor/golang.org/x/text/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/modules.txt b/vendor/modules.txt index 5b8ec1c6..4a18029b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -43,6 +43,9 @@ github.com/Microsoft/hcsshim/internal/vmcompute github.com/Microsoft/hcsshim/internal/wclayer github.com/Microsoft/hcsshim/internal/winapi github.com/Microsoft/hcsshim/osversion +# github.com/STARRY-S/zip v0.1.0 +## explicit; go 1.21.3 +github.com/STARRY-S/zip # github.com/VividCortex/ewma v1.2.0 ## explicit; go 1.12 github.com/VividCortex/ewma @@ -52,20 +55,22 @@ github.com/acarl005/stripansi # github.com/alecthomas/kong v1.6.0 ## explicit; go 1.20 github.com/alecthomas/kong -# github.com/andybalholm/brotli v1.0.4 -## explicit; go 1.12 +# github.com/andybalholm/brotli v1.1.1 +## explicit; go 1.13 github.com/andybalholm/brotli +github.com/andybalholm/brotli/matchfinder # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/bodgit/plumbing v1.2.0 +# github.com/bodgit/plumbing v1.3.0 ## explicit; go 1.13 github.com/bodgit/plumbing -# github.com/bodgit/sevenzip v1.3.0 -## explicit; go 1.17 +# github.com/bodgit/sevenzip v1.5.2 +## explicit; go 1.19 github.com/bodgit/sevenzip github.com/bodgit/sevenzip/internal/aes7z github.com/bodgit/sevenzip/internal/bcj2 +github.com/bodgit/sevenzip/internal/bra github.com/bodgit/sevenzip/internal/brotli github.com/bodgit/sevenzip/internal/bzip2 github.com/bodgit/sevenzip/internal/deflate @@ -76,12 +81,9 @@ github.com/bodgit/sevenzip/internal/lzma2 github.com/bodgit/sevenzip/internal/pool github.com/bodgit/sevenzip/internal/util github.com/bodgit/sevenzip/internal/zstd -# github.com/bodgit/windows v1.0.0 +# github.com/bodgit/windows v1.0.1 ## explicit; go 1.13 github.com/bodgit/windows -# github.com/connesc/cipherio v0.2.1 -## explicit; go 1.14 -github.com/connesc/cipherio # github.com/containerd/cgroups/v3 v3.0.3 ## explicit; go 1.18 github.com/containerd/cgroups/v3/cgroup1/stats @@ -281,7 +283,7 @@ github.com/docker/go-connections/tlsconfig # github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units -# github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 +# github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 ## explicit; go 1.9 github.com/dsnet/compress github.com/dsnet/compress/bzip2 @@ -373,20 +375,26 @@ github.com/hashicorp/errwrap # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror +# github.com/hashicorp/golang-lru/v2 v2.0.7 +## explicit; go 1.18 +github.com/hashicorp/golang-lru/v2 +github.com/hashicorp/golang-lru/v2/internal +github.com/hashicorp/golang-lru/v2/simplelru # github.com/josharian/intern v1.0.0 ## explicit; go 1.5 github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.17.8 -## explicit; go 1.20 +# github.com/klauspost/compress v1.17.11 +## explicit; go 1.21 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/gzip github.com/klauspost/compress/huff0 github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/godebug github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zip github.com/klauspost/compress/zlib @@ -420,8 +428,8 @@ github.com/mattn/go-runewidth # github.com/mattn/go-sqlite3 v1.14.22 ## explicit; go 1.19 github.com/mattn/go-sqlite3 -# github.com/mholt/archiver/v4 v4.0.0-alpha.8 -## explicit; go 1.18 +# github.com/mholt/archiver/v4 v4.0.0-alpha.9 +## explicit; go 1.22.2 github.com/mholt/archiver/v4 # github.com/miekg/pkcs11 v1.1.1 ## explicit; go 1.12 @@ -447,7 +455,7 @@ github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.2 ## explicit; go 1.12 github.com/modern-go/reflect2 -# github.com/nwaples/rardecode/v2 v2.0.0-beta.2 +# github.com/nwaples/rardecode/v2 v2.0.0-beta.4 ## explicit; go 1.16 github.com/nwaples/rardecode/v2 # github.com/oklog/ulid v1.3.1 @@ -473,7 +481,7 @@ github.com/opencontainers/selinux/pkg/pwalkdir ## explicit github.com/ostreedev/ostree-go/pkg/glibobject github.com/ostreedev/ostree-go/pkg/otbuiltin -# github.com/pierrec/lz4/v4 v4.1.15 +# github.com/pierrec/lz4/v4 v4.1.21 ## explicit; go 1.14 github.com/pierrec/lz4/v4 github.com/pierrec/lz4/v4/internal/lz4block @@ -517,6 +525,9 @@ github.com/sigstore/sigstore/pkg/signature/payload # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus +# github.com/sorairolake/lzip-go v0.3.5 +## explicit; go 1.22 +github.com/sorairolake/lzip-go # github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 ## explicit; go 1.19 github.com/stefanberger/go-pkcs11uri @@ -600,9 +611,10 @@ go.opentelemetry.io/otel/metric/embedded ## explicit; go 1.20 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded -# go4.org v0.0.0-20200411211856-f5505b9728dd +# go4.org v0.0.0-20230225012048-214862532bf5 ## explicit; go 1.13 go4.org/readerutil +go4.org/syncutil # golang.org/x/crypto v0.23.0 ## explicit; go 1.18 golang.org/x/crypto/cast5 @@ -646,7 +658,7 @@ golang.org/x/sys/windows/registry # golang.org/x/term v0.20.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.15.0 +# golang.org/x/text v0.19.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap